 07a723097c
			
		
	
	
	07a723097c
	
	
	
		
			
			Commit 5fd75a7850 (dma-mapping: remove unnecessary sync_single_range_*
in dma_map_ops) unified not only the dma_map_ops but also the
corresponding debug_dma_sync_* calls.  This led to spurious WARN()ings
like the following because the DMA debug code was no longer able to detect
the DMA buffer base address without the separate offset parameter:
WARNING: at lib/dma-debug.c:911 check_sync+0xce/0x446()
firewire_ohci 0000:04:00.0: DMA-API: device driver tries to sync DMA memory it has not allocated [device address=0x00000000cedaa400] [size=1024 bytes]
Call Trace: ...
 [<ffffffff811326a5>] check_sync+0xce/0x446
 [<ffffffff81132ad9>] debug_dma_sync_single_for_device+0x39/0x3b
 [<ffffffffa01d6e6a>] ohci_queue_iso+0x4f3/0x77d [firewire_ohci]
 ...
To fix this, unshare the sync_single_* and sync_single_range_*
implementations so that we are able to call the correct debug_dma_sync_*
functions.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Clemens Ladisch <clemens@ladisch.de>
Cc: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
		
	
			
		
			
				
	
	
		
			178 lines
		
	
	
	
		
			5.2 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			178 lines
		
	
	
	
		
			5.2 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
| #ifndef _ASM_GENERIC_DMA_MAPPING_H
 | |
| #define _ASM_GENERIC_DMA_MAPPING_H
 | |
| 
 | |
| #include <linux/kmemcheck.h>
 | |
| #include <linux/scatterlist.h>
 | |
| #include <linux/dma-debug.h>
 | |
| #include <linux/dma-attrs.h>
 | |
| 
 | |
| static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
 | |
| 					      size_t size,
 | |
| 					      enum dma_data_direction dir,
 | |
| 					      struct dma_attrs *attrs)
 | |
| {
 | |
| 	struct dma_map_ops *ops = get_dma_ops(dev);
 | |
| 	dma_addr_t addr;
 | |
| 
 | |
| 	kmemcheck_mark_initialized(ptr, size);
 | |
| 	BUG_ON(!valid_dma_direction(dir));
 | |
| 	addr = ops->map_page(dev, virt_to_page(ptr),
 | |
| 			     (unsigned long)ptr & ~PAGE_MASK, size,
 | |
| 			     dir, attrs);
 | |
| 	debug_dma_map_page(dev, virt_to_page(ptr),
 | |
| 			   (unsigned long)ptr & ~PAGE_MASK, size,
 | |
| 			   dir, addr, true);
 | |
| 	return addr;
 | |
| }
 | |
| 
 | |
| static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
 | |
| 					  size_t size,
 | |
| 					  enum dma_data_direction dir,
 | |
| 					  struct dma_attrs *attrs)
 | |
| {
 | |
| 	struct dma_map_ops *ops = get_dma_ops(dev);
 | |
| 
 | |
| 	BUG_ON(!valid_dma_direction(dir));
 | |
| 	if (ops->unmap_page)
 | |
| 		ops->unmap_page(dev, addr, size, dir, attrs);
 | |
| 	debug_dma_unmap_page(dev, addr, size, dir, true);
 | |
| }
 | |
| 
 | |
| static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
 | |
| 				   int nents, enum dma_data_direction dir,
 | |
| 				   struct dma_attrs *attrs)
 | |
| {
 | |
| 	struct dma_map_ops *ops = get_dma_ops(dev);
 | |
| 	int i, ents;
 | |
| 	struct scatterlist *s;
 | |
| 
 | |
| 	for_each_sg(sg, s, nents, i)
 | |
| 		kmemcheck_mark_initialized(sg_virt(s), s->length);
 | |
| 	BUG_ON(!valid_dma_direction(dir));
 | |
| 	ents = ops->map_sg(dev, sg, nents, dir, attrs);
 | |
| 	debug_dma_map_sg(dev, sg, nents, ents, dir);
 | |
| 
 | |
| 	return ents;
 | |
| }
 | |
| 
 | |
| static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
 | |
| 				      int nents, enum dma_data_direction dir,
 | |
| 				      struct dma_attrs *attrs)
 | |
| {
 | |
| 	struct dma_map_ops *ops = get_dma_ops(dev);
 | |
| 
 | |
| 	BUG_ON(!valid_dma_direction(dir));
 | |
| 	debug_dma_unmap_sg(dev, sg, nents, dir);
 | |
| 	if (ops->unmap_sg)
 | |
| 		ops->unmap_sg(dev, sg, nents, dir, attrs);
 | |
| }
 | |
| 
 | |
| static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
 | |
| 				      size_t offset, size_t size,
 | |
| 				      enum dma_data_direction dir)
 | |
| {
 | |
| 	struct dma_map_ops *ops = get_dma_ops(dev);
 | |
| 	dma_addr_t addr;
 | |
| 
 | |
| 	kmemcheck_mark_initialized(page_address(page) + offset, size);
 | |
| 	BUG_ON(!valid_dma_direction(dir));
 | |
| 	addr = ops->map_page(dev, page, offset, size, dir, NULL);
 | |
| 	debug_dma_map_page(dev, page, offset, size, dir, addr, false);
 | |
| 
 | |
| 	return addr;
 | |
| }
 | |
| 
 | |
| static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
 | |
| 				  size_t size, enum dma_data_direction dir)
 | |
| {
 | |
| 	struct dma_map_ops *ops = get_dma_ops(dev);
 | |
| 
 | |
| 	BUG_ON(!valid_dma_direction(dir));
 | |
| 	if (ops->unmap_page)
 | |
| 		ops->unmap_page(dev, addr, size, dir, NULL);
 | |
| 	debug_dma_unmap_page(dev, addr, size, dir, false);
 | |
| }
 | |
| 
 | |
| static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
 | |
| 					   size_t size,
 | |
| 					   enum dma_data_direction dir)
 | |
| {
 | |
| 	struct dma_map_ops *ops = get_dma_ops(dev);
 | |
| 
 | |
| 	BUG_ON(!valid_dma_direction(dir));
 | |
| 	if (ops->sync_single_for_cpu)
 | |
| 		ops->sync_single_for_cpu(dev, addr, size, dir);
 | |
| 	debug_dma_sync_single_for_cpu(dev, addr, size, dir);
 | |
| }
 | |
| 
 | |
| static inline void dma_sync_single_for_device(struct device *dev,
 | |
| 					      dma_addr_t addr, size_t size,
 | |
| 					      enum dma_data_direction dir)
 | |
| {
 | |
| 	struct dma_map_ops *ops = get_dma_ops(dev);
 | |
| 
 | |
| 	BUG_ON(!valid_dma_direction(dir));
 | |
| 	if (ops->sync_single_for_device)
 | |
| 		ops->sync_single_for_device(dev, addr, size, dir);
 | |
| 	debug_dma_sync_single_for_device(dev, addr, size, dir);
 | |
| }
 | |
| 
 | |
| static inline void dma_sync_single_range_for_cpu(struct device *dev,
 | |
| 						 dma_addr_t addr,
 | |
| 						 unsigned long offset,
 | |
| 						 size_t size,
 | |
| 						 enum dma_data_direction dir)
 | |
| {
 | |
| 	const struct dma_map_ops *ops = get_dma_ops(dev);
 | |
| 
 | |
| 	BUG_ON(!valid_dma_direction(dir));
 | |
| 	if (ops->sync_single_for_cpu)
 | |
| 		ops->sync_single_for_cpu(dev, addr + offset, size, dir);
 | |
| 	debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
 | |
| }
 | |
| 
 | |
| static inline void dma_sync_single_range_for_device(struct device *dev,
 | |
| 						    dma_addr_t addr,
 | |
| 						    unsigned long offset,
 | |
| 						    size_t size,
 | |
| 						    enum dma_data_direction dir)
 | |
| {
 | |
| 	const struct dma_map_ops *ops = get_dma_ops(dev);
 | |
| 
 | |
| 	BUG_ON(!valid_dma_direction(dir));
 | |
| 	if (ops->sync_single_for_device)
 | |
| 		ops->sync_single_for_device(dev, addr + offset, size, dir);
 | |
| 	debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
 | |
| }
 | |
| 
 | |
| static inline void
 | |
| dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
 | |
| 		    int nelems, enum dma_data_direction dir)
 | |
| {
 | |
| 	struct dma_map_ops *ops = get_dma_ops(dev);
 | |
| 
 | |
| 	BUG_ON(!valid_dma_direction(dir));
 | |
| 	if (ops->sync_sg_for_cpu)
 | |
| 		ops->sync_sg_for_cpu(dev, sg, nelems, dir);
 | |
| 	debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
 | |
| }
 | |
| 
 | |
| static inline void
 | |
| dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 | |
| 		       int nelems, enum dma_data_direction dir)
 | |
| {
 | |
| 	struct dma_map_ops *ops = get_dma_ops(dev);
 | |
| 
 | |
| 	BUG_ON(!valid_dma_direction(dir));
 | |
| 	if (ops->sync_sg_for_device)
 | |
| 		ops->sync_sg_for_device(dev, sg, nelems, dir);
 | |
| 	debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
 | |
| 
 | |
| }
 | |
| 
 | |
| #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
 | |
| #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
 | |
| #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
 | |
| #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
 | |
| 
 | |
| #endif
 |