| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | #ifndef ASMARM_DMA_MAPPING_H
 | 
					
						
							|  |  |  | #define ASMARM_DMA_MAPPING_H
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #ifdef __KERNEL__
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-08-10 12:10:49 +01:00
										 |  |  | #include <linux/mm_types.h>
 | 
					
						
							| 
									
										
										
										
											2007-10-23 12:37:59 +02:00
										 |  |  | #include <linux/scatterlist.h>
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-18 13:30:14 +04:00
										 |  |  | #include <asm-generic/dma-coherent.h>
 | 
					
						
							| 
									
										
										
										
											2008-08-10 12:10:49 +01:00
										 |  |  | #include <asm/memory.h>
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |  * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions | 
					
						
							|  |  |  |  * used internally by the DMA-mapping API to provide DMA addresses. They | 
					
						
							|  |  |  |  * must not be used by drivers. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | #ifndef __arch_page_to_dma
 | 
					
						
							| 
									
										
										
										
											2008-09-09 15:54:13 -04:00
										 |  |  | static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	return (dma_addr_t)__pfn_to_bus(page_to_pfn(page)); | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2008-08-10 12:10:49 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-10-31 16:07:16 +00:00
										 |  |  | static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	return pfn_to_page(__bus_to_pfn(addr)); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-08-10 12:10:49 +01:00
										 |  |  | static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	return (void *)__bus_to_virt(addr); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	return (dma_addr_t)__virt_to_bus((unsigned long)(addr)); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | #else
 | 
					
						
							|  |  |  | static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	return __arch_page_to_dma(dev, page); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-10-31 16:07:16 +00:00
										 |  |  | static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	return __arch_dma_to_page(dev, addr); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-08-10 12:10:49 +01:00
										 |  |  | static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	return __arch_dma_to_virt(dev, addr); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	return __arch_virt_to_dma(dev, addr); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | #endif
 | 
					
						
							| 
									
										
										
										
											2008-07-18 13:30:14 +04:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-10-31 16:52:16 +00:00
										 |  |  | /*
 | 
					
						
							|  |  |  |  * The DMA API is built upon the notion of "buffer ownership".  A buffer | 
					
						
							|  |  |  |  * is either exclusively owned by the CPU (and therefore may be accessed | 
					
						
							|  |  |  |  * by it) or exclusively owned by the DMA device.  These helper functions | 
					
						
							|  |  |  |  * represent the transitions between these two ownership states. | 
					
						
							|  |  |  |  * | 
					
						
							| 
									
										
										
										
											2009-11-24 16:27:17 +00:00
										 |  |  |  * Note, however, that on later ARMs, this notion does not work due to | 
					
						
							|  |  |  |  * speculative prefetches.  We model our approach on the assumption that | 
					
						
							|  |  |  |  * the CPU does do speculative prefetches, which means we clean caches | 
					
						
							|  |  |  |  * before transfers and delay cache invalidation until transfer completion. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Private support functions: these are not part of the API and are | 
					
						
							|  |  |  |  * liable to change.  Drivers must not use these. | 
					
						
							| 
									
										
										
										
											2009-10-31 16:52:16 +00:00
										 |  |  |  */ | 
					
						
							|  |  |  | static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size, | 
					
						
							|  |  |  | 	enum dma_data_direction dir) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2009-11-24 16:27:17 +00:00
										 |  |  | 	extern void ___dma_single_cpu_to_dev(const void *, size_t, | 
					
						
							|  |  |  | 		enum dma_data_direction); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-10-31 16:52:16 +00:00
										 |  |  | 	if (!arch_is_coherent()) | 
					
						
							| 
									
										
										
										
											2009-11-24 16:27:17 +00:00
										 |  |  | 		___dma_single_cpu_to_dev(kaddr, size, dir); | 
					
						
							| 
									
										
										
										
											2009-10-31 16:52:16 +00:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size, | 
					
						
							|  |  |  | 	enum dma_data_direction dir) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2009-11-24 16:27:17 +00:00
										 |  |  | 	extern void ___dma_single_dev_to_cpu(const void *, size_t, | 
					
						
							|  |  |  | 		enum dma_data_direction); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if (!arch_is_coherent()) | 
					
						
							|  |  |  | 		___dma_single_dev_to_cpu(kaddr, size, dir); | 
					
						
							| 
									
										
										
										
											2009-10-31 16:52:16 +00:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off, | 
					
						
							|  |  |  | 	size_t size, enum dma_data_direction dir) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2009-11-24 16:27:17 +00:00
										 |  |  | 	extern void ___dma_page_cpu_to_dev(struct page *, unsigned long, | 
					
						
							|  |  |  | 		size_t, enum dma_data_direction); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-10-31 16:52:16 +00:00
										 |  |  | 	if (!arch_is_coherent()) | 
					
						
							| 
									
										
										
										
											2009-11-24 16:27:17 +00:00
										 |  |  | 		___dma_page_cpu_to_dev(page, off, size, dir); | 
					
						
							| 
									
										
										
										
											2009-10-31 16:52:16 +00:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, | 
					
						
							|  |  |  | 	size_t size, enum dma_data_direction dir) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2009-11-24 16:27:17 +00:00
										 |  |  | 	extern void ___dma_page_dev_to_cpu(struct page *, unsigned long, | 
					
						
							|  |  |  | 		size_t, enum dma_data_direction); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if (!arch_is_coherent()) | 
					
						
							|  |  |  | 		___dma_page_dev_to_cpu(page, off, size, dir); | 
					
						
							| 
									
										
										
										
											2009-10-31 16:52:16 +00:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | /*
 | 
					
						
							|  |  |  |  * Return whether the given device DMA address mask can be supported | 
					
						
							|  |  |  |  * properly.  For example, if your device can only drive the low 24-bits | 
					
						
							|  |  |  |  * during bus mastering, then you would pass 0x00ffffff as the mask | 
					
						
							|  |  |  |  * to this function. | 
					
						
							| 
									
										
										
										
											2005-04-16 15:23:57 -07:00
										 |  |  |  * | 
					
						
							|  |  |  |  * FIXME: This should really be a platform specific issue - we should | 
					
						
							|  |  |  |  * return false if GFP_DMA allocations may not satisfy the supplied 'mask'. | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  |  */ | 
					
						
							|  |  |  | static inline int dma_supported(struct device *dev, u64 mask) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2008-10-20 11:18:40 +01:00
										 |  |  | 	if (mask < ISA_DMA_THRESHOLD) | 
					
						
							|  |  |  | 		return 0; | 
					
						
							|  |  |  | 	return 1; | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static inline int dma_set_mask(struct device *dev, u64 dma_mask) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2010-03-10 15:23:40 -08:00
										 |  |  | #ifdef CONFIG_DMABOUNCE
 | 
					
						
							|  |  |  | 	if (dev->archdata.dmabounce) { | 
					
						
							|  |  |  | 		if (dma_mask >= ISA_DMA_THRESHOLD) | 
					
						
							|  |  |  | 			return 0; | 
					
						
							|  |  |  | 		else | 
					
						
							|  |  |  | 			return -EIO; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | #endif
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 	if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | 
					
						
							|  |  |  | 		return -EIO; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	*dev->dma_mask = dma_mask; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return 0; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static inline int dma_get_cache_alignment(void) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	return 32; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2006-12-06 20:38:54 -08:00
										 |  |  | static inline int dma_is_consistent(struct device *dev, dma_addr_t handle) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2006-04-02 00:07:39 +01:00
										 |  |  | 	return !!arch_is_coherent(); | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |  * DMA errors are defined by all-bits-set in the DMA address. | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2008-07-25 19:44:49 -07:00
										 |  |  | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | { | 
					
						
							|  |  |  | 	return dma_addr == ~0; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2007-02-12 19:26:05 +00:00
										 |  |  | /*
 | 
					
						
							|  |  |  |  * Dummy noncoherent implementation.  We don't provide a dma_cache_sync | 
					
						
							|  |  |  |  * function so drivers using this API are highlighted with build warnings. | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2008-09-25 22:23:31 +01:00
										 |  |  | static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, | 
					
						
							|  |  |  | 		dma_addr_t *handle, gfp_t gfp) | 
					
						
							| 
									
										
										
										
											2007-02-12 19:26:05 +00:00
										 |  |  | { | 
					
						
							|  |  |  | 	return NULL; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-09-25 22:23:31 +01:00
										 |  |  | static inline void dma_free_noncoherent(struct device *dev, size_t size, | 
					
						
							|  |  |  | 		void *cpu_addr, dma_addr_t handle) | 
					
						
							| 
									
										
										
										
											2007-02-12 19:26:05 +00:00
										 |  |  | { | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | /**
 | 
					
						
							|  |  |  |  * dma_alloc_coherent - allocate consistent memory for DMA | 
					
						
							|  |  |  |  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
					
						
							|  |  |  |  * @size: required memory size | 
					
						
							|  |  |  |  * @handle: bus-specific DMA address | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Allocate some uncached, unbuffered memory for a device for | 
					
						
							|  |  |  |  * performing DMA.  This function allocates pages, and will | 
					
						
							|  |  |  |  * return the CPU-viewed address, and sets @handle to be the | 
					
						
							|  |  |  |  * device-viewed address. | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2008-09-25 22:23:31 +01:00
										 |  |  | extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | /**
 | 
					
						
							|  |  |  |  * dma_free_coherent - free memory allocated by dma_alloc_coherent | 
					
						
							|  |  |  |  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
					
						
							|  |  |  |  * @size: size of memory originally requested in dma_alloc_coherent | 
					
						
							|  |  |  |  * @cpu_addr: CPU-view address returned from dma_alloc_coherent | 
					
						
							|  |  |  |  * @handle: device-view address returned from dma_alloc_coherent | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Free (and unmap) a DMA buffer previously allocated by | 
					
						
							|  |  |  |  * dma_alloc_coherent(). | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * References to memory and mappings associated with cpu_addr/handle | 
					
						
							|  |  |  |  * during and after this call executing are illegal. | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2008-09-25 22:23:31 +01:00
										 |  |  | extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t); | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | /**
 | 
					
						
							|  |  |  |  * dma_mmap_coherent - map a coherent DMA allocation into user space | 
					
						
							|  |  |  |  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
					
						
							|  |  |  |  * @vma: vm_area_struct describing requested user mapping | 
					
						
							|  |  |  |  * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent | 
					
						
							|  |  |  |  * @handle: device-view address returned from dma_alloc_coherent | 
					
						
							|  |  |  |  * @size: size of memory originally requested in dma_alloc_coherent | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Map a coherent DMA buffer previously allocated by dma_alloc_coherent | 
					
						
							|  |  |  |  * into user space.  The coherent DMA buffer must not be freed by the | 
					
						
							|  |  |  |  * driver until the user space mapping has been released. | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2008-09-25 22:23:31 +01:00
										 |  |  | int dma_mmap_coherent(struct device *, struct vm_area_struct *, | 
					
						
							|  |  |  | 		void *, dma_addr_t, size_t); | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /**
 | 
					
						
							|  |  |  |  * dma_alloc_writecombine - allocate writecombining memory for DMA | 
					
						
							|  |  |  |  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
					
						
							|  |  |  |  * @size: required memory size | 
					
						
							|  |  |  |  * @handle: bus-specific DMA address | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Allocate some uncached, buffered memory for a device for | 
					
						
							|  |  |  |  * performing DMA.  This function allocates pages, and will | 
					
						
							|  |  |  |  * return the CPU-viewed address, and sets @handle to be the | 
					
						
							|  |  |  |  * device-viewed address. | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2008-09-25 22:23:31 +01:00
										 |  |  | extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *, | 
					
						
							|  |  |  | 		gfp_t); | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | #define dma_free_writecombine(dev,size,cpu_addr,handle) \
 | 
					
						
							|  |  |  | 	dma_free_coherent(dev,size,cpu_addr,handle) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-09-25 22:23:31 +01:00
										 |  |  | int dma_mmap_writecombine(struct device *, struct vm_area_struct *, | 
					
						
							|  |  |  | 		void *, dma_addr_t, size_t); | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-09-25 21:52:49 +01:00
										 |  |  | #ifdef CONFIG_DMABOUNCE
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |  * For SA-1111, IXP425, and ADI systems  the dma-mapping functions are "magic" | 
					
						
							|  |  |  |  * and utilize bounce buffers as needed to work around limited DMA windows. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * On the SA-1111, a bug limits DMA to only certain regions of RAM. | 
					
						
							|  |  |  |  * On the IXP425, the PCI inbound window is 64MB (256MB total RAM) | 
					
						
							|  |  |  |  * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM) | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * The following are helper functions used by the dmabounce subystem | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /**
 | 
					
						
							|  |  |  |  * dmabounce_register_dev | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * @dev: valid struct device pointer | 
					
						
							|  |  |  |  * @small_buf_size: size of buffers to use with small buffer pool | 
					
						
							|  |  |  |  * @large_buf_size: size of buffers to use with large buffer pool (can be 0) | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * This function should be called by low-level platform code to register | 
					
						
							|  |  |  |  * a device as requireing DMA buffer bouncing. The function will allocate | 
					
						
							|  |  |  |  * appropriate DMA pools for the device. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2008-09-25 22:23:31 +01:00
										 |  |  | extern int dmabounce_register_dev(struct device *, unsigned long, | 
					
						
							|  |  |  | 		unsigned long); | 
					
						
							| 
									
										
										
										
											2008-09-25 21:52:49 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | /**
 | 
					
						
							|  |  |  |  * dmabounce_unregister_dev | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * @dev: valid struct device pointer | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * This function should be called by low-level platform code when device | 
					
						
							|  |  |  |  * that was previously registered with dmabounce_register_dev is removed | 
					
						
							|  |  |  |  * from the system. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | extern void dmabounce_unregister_dev(struct device *); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /**
 | 
					
						
							|  |  |  |  * dma_needs_bounce | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * @dev: valid struct device pointer | 
					
						
							|  |  |  |  * @dma_handle: dma_handle of unbounced buffer | 
					
						
							|  |  |  |  * @size: size of region being mapped | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Platforms that utilize the dmabounce mechanism must implement | 
					
						
							|  |  |  |  * this function. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * The dmabounce routines call this function whenever a dma-mapping | 
					
						
							|  |  |  |  * is requested to determine whether a given buffer needs to be bounced | 
					
						
							|  |  |  |  * or not. The function must return 0 if the buffer is OK for | 
					
						
							|  |  |  |  * DMA access and 1 if the buffer needs to be bounced. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-09-25 22:16:22 +01:00
										 |  |  | /*
 | 
					
						
							|  |  |  |  * The DMA API, implemented by dmabounce.c.  See below for descriptions. | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2008-09-25 22:23:31 +01:00
										 |  |  | extern dma_addr_t dma_map_single(struct device *, void *, size_t, | 
					
						
							|  |  |  | 		enum dma_data_direction); | 
					
						
							| 
									
										
										
										
											2009-10-31 16:10:10 +00:00
										 |  |  | extern void dma_unmap_single(struct device *, dma_addr_t, size_t, | 
					
						
							|  |  |  | 		enum dma_data_direction); | 
					
						
							| 
									
										
										
										
											2008-09-25 22:23:31 +01:00
										 |  |  | extern dma_addr_t dma_map_page(struct device *, struct page *, | 
					
						
							|  |  |  | 		unsigned long, size_t, enum dma_data_direction); | 
					
						
							| 
									
										
										
										
											2009-10-31 16:10:10 +00:00
										 |  |  | extern void dma_unmap_page(struct device *, dma_addr_t, size_t, | 
					
						
							| 
									
										
										
										
											2008-09-25 22:23:31 +01:00
										 |  |  | 		enum dma_data_direction); | 
					
						
							| 
									
										
										
										
											2008-09-25 22:16:22 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-09-25 21:52:49 +01:00
										 |  |  | /*
 | 
					
						
							|  |  |  |  * Private functions | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long, | 
					
						
							| 
									
										
										
										
											2008-09-25 22:23:31 +01:00
										 |  |  | 		size_t, enum dma_data_direction); | 
					
						
							| 
									
										
										
										
											2008-09-25 21:52:49 +01:00
										 |  |  | int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, | 
					
						
							| 
									
										
										
										
											2008-09-25 22:23:31 +01:00
										 |  |  | 		size_t, enum dma_data_direction); | 
					
						
							| 
									
										
										
										
											2008-09-25 21:52:49 +01:00
										 |  |  | #else
 | 
					
						
							| 
									
										
										
										
											2008-11-13 14:33:51 +00:00
										 |  |  | static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr, | 
					
						
							|  |  |  | 	unsigned long offset, size_t size, enum dma_data_direction dir) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	return 1; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, | 
					
						
							|  |  |  | 	unsigned long offset, size_t size, enum dma_data_direction dir) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	return 1; | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2008-09-25 21:52:49 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | /**
 | 
					
						
							|  |  |  |  * dma_map_single - map a single buffer for streaming DMA | 
					
						
							|  |  |  |  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
					
						
							|  |  |  |  * @cpu_addr: CPU direct mapped address of buffer | 
					
						
							|  |  |  |  * @size: size of buffer to map | 
					
						
							|  |  |  |  * @dir: DMA transfer direction | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Ensure that any data held in the cache is appropriately discarded | 
					
						
							|  |  |  |  * or written back. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * The device owns this memory once this call has completed.  The CPU | 
					
						
							|  |  |  |  * can regain ownership by calling dma_unmap_single() or | 
					
						
							|  |  |  |  * dma_sync_single_for_cpu(). | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2008-09-25 22:23:31 +01:00
										 |  |  | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | 
					
						
							|  |  |  | 		size_t size, enum dma_data_direction dir) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2008-09-29 13:48:17 +01:00
										 |  |  | 	BUG_ON(!valid_dma_direction(dir)); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-10-31 16:52:16 +00:00
										 |  |  | 	__dma_single_cpu_to_dev(cpu_addr, size, dir); | 
					
						
							| 
									
										
										
										
											2006-04-02 00:07:39 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-08-10 12:10:49 +01:00
										 |  |  | 	return virt_to_dma(dev, cpu_addr); | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2008-09-25 22:16:22 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | /**
 | 
					
						
							|  |  |  |  * dma_map_page - map a portion of a page for streaming DMA | 
					
						
							|  |  |  |  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
					
						
							|  |  |  |  * @page: page that buffer resides in | 
					
						
							|  |  |  |  * @offset: offset into page for start of buffer | 
					
						
							|  |  |  |  * @size: size of buffer to map | 
					
						
							|  |  |  |  * @dir: DMA transfer direction | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Ensure that any data held in the cache is appropriately discarded | 
					
						
							|  |  |  |  * or written back. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * The device owns this memory once this call has completed.  The CPU | 
					
						
							| 
									
										
										
										
											2008-09-30 11:30:24 +01:00
										 |  |  |  * can regain ownership by calling dma_unmap_page(). | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  |  */ | 
					
						
							| 
									
										
										
										
											2008-09-25 22:23:31 +01:00
										 |  |  | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | 
					
						
							|  |  |  | 	     unsigned long offset, size_t size, enum dma_data_direction dir) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2008-09-29 13:48:17 +01:00
										 |  |  | 	BUG_ON(!valid_dma_direction(dir)); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-10-31 16:52:16 +00:00
										 |  |  | 	__dma_page_cpu_to_dev(page, offset, size, dir); | 
					
						
							| 
									
										
										
										
											2008-09-25 20:59:12 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	return page_to_dma(dev, page) + offset; | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /**
 | 
					
						
							|  |  |  |  * dma_unmap_single - unmap a single buffer previously mapped | 
					
						
							|  |  |  |  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
					
						
							|  |  |  |  * @handle: DMA address of buffer | 
					
						
							| 
									
										
										
										
											2008-09-30 11:30:24 +01:00
										 |  |  |  * @size: size of buffer (same as passed to dma_map_single) | 
					
						
							|  |  |  |  * @dir: DMA transfer direction (same as passed to dma_map_single) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  |  * | 
					
						
							|  |  |  |  * Unmap a single streaming mode DMA translation.  The handle and size | 
					
						
							|  |  |  |  * must match what was provided in the previous dma_map_single() call. | 
					
						
							|  |  |  |  * All other usages are undefined. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * After this call, reads by the CPU to the buffer are guaranteed to see | 
					
						
							|  |  |  |  * whatever the device wrote there. | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2008-09-25 22:23:31 +01:00
										 |  |  | static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, | 
					
						
							|  |  |  | 		size_t size, enum dma_data_direction dir) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2009-10-31 16:52:16 +00:00
										 |  |  | 	__dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /**
 | 
					
						
							|  |  |  |  * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() | 
					
						
							|  |  |  |  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
					
						
							|  |  |  |  * @handle: DMA address of buffer | 
					
						
							| 
									
										
										
										
											2008-09-30 11:30:24 +01:00
										 |  |  |  * @size: size of buffer (same as passed to dma_map_page) | 
					
						
							|  |  |  |  * @dir: DMA transfer direction (same as passed to dma_map_page) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  |  * | 
					
						
							| 
									
										
										
										
											2008-09-30 11:30:24 +01:00
										 |  |  |  * Unmap a page streaming mode DMA translation.  The handle and size | 
					
						
							|  |  |  |  * must match what was provided in the previous dma_map_page() call. | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  |  * All other usages are undefined. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * After this call, reads by the CPU to the buffer are guaranteed to see | 
					
						
							|  |  |  |  * whatever the device wrote there. | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2008-09-25 22:23:31 +01:00
										 |  |  | static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, | 
					
						
							|  |  |  | 		size_t size, enum dma_data_direction dir) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2009-10-31 16:52:16 +00:00
										 |  |  | 	__dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK, | 
					
						
							|  |  |  | 		size, dir); | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2009-10-31 16:10:10 +00:00
										 |  |  | #endif /* CONFIG_DMABOUNCE */
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | /**
 | 
					
						
							| 
									
										
										
										
											2008-08-10 12:18:26 +01:00
										 |  |  |  * dma_sync_single_range_for_cpu | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  |  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
					
						
							|  |  |  |  * @handle: DMA address of buffer | 
					
						
							| 
									
										
										
										
											2008-08-10 12:18:26 +01:00
										 |  |  |  * @offset: offset of region to start sync | 
					
						
							|  |  |  |  * @size: size of region to sync | 
					
						
							|  |  |  |  * @dir: DMA transfer direction (same as passed to dma_map_single) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  |  * | 
					
						
							|  |  |  |  * Make physical memory consistent for a single streaming mode DMA | 
					
						
							|  |  |  |  * translation after a transfer. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * If you perform a dma_map_single() but wish to interrogate the | 
					
						
							|  |  |  |  * buffer using the cpu, yet do not wish to teardown the PCI dma | 
					
						
							|  |  |  |  * mapping, you must call this function before doing so.  At the | 
					
						
							|  |  |  |  * next point you give the PCI dma address back to the card, you | 
					
						
							|  |  |  |  * must first the perform a dma_sync_for_device, and then the | 
					
						
							|  |  |  |  * device again owns the buffer. | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2008-09-25 22:23:31 +01:00
										 |  |  | static inline void dma_sync_single_range_for_cpu(struct device *dev, | 
					
						
							|  |  |  | 		dma_addr_t handle, unsigned long offset, size_t size, | 
					
						
							|  |  |  | 		enum dma_data_direction dir) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2008-09-29 13:48:17 +01:00
										 |  |  | 	BUG_ON(!valid_dma_direction(dir)); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-10-31 16:52:16 +00:00
										 |  |  | 	if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) | 
					
						
							|  |  |  | 		return; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	__dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir); | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-09-25 22:23:31 +01:00
										 |  |  | static inline void dma_sync_single_range_for_device(struct device *dev, | 
					
						
							|  |  |  | 		dma_addr_t handle, unsigned long offset, size_t size, | 
					
						
							|  |  |  | 		enum dma_data_direction dir) | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2008-09-29 13:48:17 +01:00
										 |  |  | 	BUG_ON(!valid_dma_direction(dir)); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-09-25 21:52:49 +01:00
										 |  |  | 	if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) | 
					
						
							|  |  |  | 		return; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-10-31 16:52:16 +00:00
										 |  |  | 	__dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir); | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-09-25 22:23:31 +01:00
										 |  |  | static inline void dma_sync_single_for_cpu(struct device *dev, | 
					
						
							|  |  |  | 		dma_addr_t handle, size_t size, enum dma_data_direction dir) | 
					
						
							| 
									
										
										
										
											2008-08-10 12:18:26 +01:00
										 |  |  | { | 
					
						
							|  |  |  | 	dma_sync_single_range_for_cpu(dev, handle, 0, size, dir); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-09-25 22:23:31 +01:00
										 |  |  | static inline void dma_sync_single_for_device(struct device *dev, | 
					
						
							|  |  |  | 		dma_addr_t handle, size_t size, enum dma_data_direction dir) | 
					
						
							| 
									
										
										
										
											2008-08-10 12:18:26 +01:00
										 |  |  | { | 
					
						
							|  |  |  | 	dma_sync_single_range_for_device(dev, handle, 0, size, dir); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-09-25 16:30:57 +01:00
										 |  |  | /*
 | 
					
						
							|  |  |  |  * The scatter list versions of the above methods. | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  |  */ | 
					
						
							| 
									
										
										
										
											2008-09-25 22:23:31 +01:00
										 |  |  | extern int dma_map_sg(struct device *, struct scatterlist *, int, | 
					
						
							|  |  |  | 		enum dma_data_direction); | 
					
						
							|  |  |  | extern void dma_unmap_sg(struct device *, struct scatterlist *, int, | 
					
						
							|  |  |  | 		enum dma_data_direction); | 
					
						
							|  |  |  | extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, | 
					
						
							|  |  |  | 		enum dma_data_direction); | 
					
						
							|  |  |  | extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int, | 
					
						
							|  |  |  | 		enum dma_data_direction); | 
					
						
							| 
									
										
										
										
											2008-09-25 16:30:57 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-04-16 15:20:36 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | #endif /* __KERNEL__ */
 | 
					
						
							|  |  |  | #endif
 |