Merge branch 'sglist-arch' into for-linus
This commit is contained in:
		
				commit
				
					
						3eed13fd93
					
				
			
		
					 110 changed files with 870 additions and 597 deletions
				
			
		|  | @ -514,7 +514,7 @@ With scatterlists, you map a region gathered from several regions by: | |||
| 	int i, count = pci_map_sg(dev, sglist, nents, direction); | ||||
| 	struct scatterlist *sg; | ||||
| 
 | ||||
| 	for (i = 0, sg = sglist; i < count; i++, sg++) { | ||||
| 	for_each_sg(sglist, sg, count, i) { | ||||
| 		hw_address[i] = sg_dma_address(sg); | ||||
| 		hw_len[i] = sg_dma_len(sg); | ||||
| 	} | ||||
|  |  | |||
|  | @ -396,7 +396,7 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) | |||
| 		printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents, | ||||
| 		       startsg->dma_address, startsg->dma_length, | ||||
| 		       sba_sg_address(startsg)); | ||||
| 		startsg++; | ||||
| 		startsg = sg_next(startsg); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  | @ -409,7 +409,7 @@ sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) | |||
| 	while (the_nents-- > 0) { | ||||
| 		if (sba_sg_address(the_sg) == 0x0UL) | ||||
| 			sba_dump_sg(NULL, startsg, nents); | ||||
| 		the_sg++; | ||||
| 		the_sg = sg_next(the_sg); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  | @ -1201,7 +1201,7 @@ sba_fill_pdir( | |||
| 			u32 pide = startsg->dma_address & ~PIDE_FLAG; | ||||
| 			dma_offset = (unsigned long) pide & ~iovp_mask; | ||||
| 			startsg->dma_address = 0; | ||||
| 			dma_sg++; | ||||
| 			dma_sg = sg_next(dma_sg); | ||||
| 			dma_sg->dma_address = pide | ioc->ibase; | ||||
| 			pdirp = &(ioc->pdir_base[pide >> iovp_shift]); | ||||
| 			n_mappings++; | ||||
|  | @ -1228,7 +1228,7 @@ sba_fill_pdir( | |||
| 				pdirp++; | ||||
| 			} while (cnt > 0); | ||||
| 		} | ||||
| 		startsg++; | ||||
| 		startsg = sg_next(startsg); | ||||
| 	} | ||||
| 	/* force pdir update */ | ||||
| 	wmb(); | ||||
|  | @ -1297,7 +1297,7 @@ sba_coalesce_chunks( struct ioc *ioc, | |||
| 		while (--nents > 0) { | ||||
| 			unsigned long vaddr;	/* tmp */ | ||||
| 
 | ||||
| 			startsg++; | ||||
| 			startsg = sg_next(startsg); | ||||
| 
 | ||||
| 			/* PARANOID */ | ||||
| 			startsg->dma_address = startsg->dma_length = 0; | ||||
|  | @ -1407,7 +1407,7 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di | |||
| #ifdef ALLOW_IOV_BYPASS_SG | ||||
| 	ASSERT(to_pci_dev(dev)->dma_mask); | ||||
| 	if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) { | ||||
| 		for (sg = sglist ; filled < nents ; filled++, sg++){ | ||||
| 		for_each_sg(sglist, sg, nents, filled) { | ||||
| 			sg->dma_length = sg->length; | ||||
| 			sg->dma_address = virt_to_phys(sba_sg_address(sg)); | ||||
| 		} | ||||
|  | @ -1501,7 +1501,7 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in | |||
| 	while (nents && sglist->dma_length) { | ||||
| 
 | ||||
| 		sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir); | ||||
| 		sglist++; | ||||
| 		sglist = sg_next(sglist); | ||||
| 		nents--; | ||||
| 	} | ||||
| 
 | ||||
|  |  | |||
|  | @ -360,6 +360,7 @@ static struct scsi_host_template driver_template = { | |||
| 	.max_sectors		= 1024, | ||||
| 	.cmd_per_lun		= SIMSCSI_REQ_QUEUE_LEN, | ||||
| 	.use_clustering		= DISABLE_CLUSTERING, | ||||
| 	.use_sg_chaining	= ENABLE_SG_CHAINING, | ||||
| }; | ||||
| 
 | ||||
| static int __init | ||||
|  |  | |||
|  | @ -218,16 +218,17 @@ EXPORT_SYMBOL(sn_dma_unmap_single); | |||
|  * | ||||
|  * Unmap a set of streaming mode DMA translations. | ||||
|  */ | ||||
| void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg, | ||||
| void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, | ||||
| 		     int nhwentries, int direction) | ||||
| { | ||||
| 	int i; | ||||
| 	struct pci_dev *pdev = to_pci_dev(dev); | ||||
| 	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | ||||
| 	struct scatterlist *sg; | ||||
| 
 | ||||
| 	BUG_ON(dev->bus != &pci_bus_type); | ||||
| 
 | ||||
| 	for (i = 0; i < nhwentries; i++, sg++) { | ||||
| 	for_each_sg(sgl, sg, nhwentries, i) { | ||||
| 		provider->dma_unmap(pdev, sg->dma_address, direction); | ||||
| 		sg->dma_address = (dma_addr_t) NULL; | ||||
| 		sg->dma_length = 0; | ||||
|  | @ -244,11 +245,11 @@ EXPORT_SYMBOL(sn_dma_unmap_sg); | |||
|  * | ||||
|  * Maps each entry of @sg for DMA. | ||||
|  */ | ||||
| int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||||
| int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries, | ||||
| 		  int direction) | ||||
| { | ||||
| 	unsigned long phys_addr; | ||||
| 	struct scatterlist *saved_sg = sg; | ||||
| 	struct scatterlist *saved_sg = sgl, *sg; | ||||
| 	struct pci_dev *pdev = to_pci_dev(dev); | ||||
| 	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | ||||
| 	int i; | ||||
|  | @ -258,7 +259,7 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | |||
| 	/*
 | ||||
| 	 * Setup a DMA address for each entry in the scatterlist. | ||||
| 	 */ | ||||
| 	for (i = 0; i < nhwentries; i++, sg++) { | ||||
| 	for_each_sg(sgl, sg, nhwentries, i) { | ||||
| 		phys_addr = SG_ENT_PHYS_ADDRESS(sg); | ||||
| 		sg->dma_address = provider->dma_map(pdev, | ||||
| 						    phys_addr, sg->length, | ||||
|  |  | |||
|  | @ -154,12 +154,13 @@ static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr, | |||
| { | ||||
| } | ||||
| 
 | ||||
| static int dma_direct_map_sg(struct device *dev, struct scatterlist *sg, | ||||
| static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, | ||||
| 			     int nents, enum dma_data_direction direction) | ||||
| { | ||||
| 	struct scatterlist *sg; | ||||
| 	int i; | ||||
| 
 | ||||
| 	for (i = 0; i < nents; i++, sg++) { | ||||
| 	for_each_sg(sgl, sg, nents, i) { | ||||
| 		sg->dma_address = (page_to_phys(sg->page) + sg->offset) | | ||||
| 			dma_direct_offset; | ||||
| 		sg->dma_length = sg->length; | ||||
|  |  | |||
|  | @ -87,15 +87,16 @@ static void ibmebus_unmap_single(struct device *dev, | |||
| } | ||||
| 
 | ||||
| static int ibmebus_map_sg(struct device *dev, | ||||
| 			  struct scatterlist *sg, | ||||
| 			  struct scatterlist *sgl, | ||||
| 			  int nents, enum dma_data_direction direction) | ||||
| { | ||||
| 	struct scatterlist *sg; | ||||
| 	int i; | ||||
| 
 | ||||
| 	for (i = 0; i < nents; i++) { | ||||
| 		sg[i].dma_address = (dma_addr_t)page_address(sg[i].page) | ||||
| 			+ sg[i].offset; | ||||
| 		sg[i].dma_length = sg[i].length; | ||||
| 	for_each_sg(sgl, sg, nents, i) { | ||||
| 		sg->dma_address = (dma_addr_t)page_address(sg->page) | ||||
| 			+ sg->offset; | ||||
| 		sg->dma_length = sg->length; | ||||
| 	} | ||||
| 
 | ||||
| 	return nents; | ||||
|  |  | |||
|  | @ -277,7 +277,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, | |||
| 	dma_addr_t dma_next = 0, dma_addr; | ||||
| 	unsigned long flags; | ||||
| 	struct scatterlist *s, *outs, *segstart; | ||||
| 	int outcount, incount; | ||||
| 	int outcount, incount, i; | ||||
| 	unsigned long handle; | ||||
| 
 | ||||
| 	BUG_ON(direction == DMA_NONE); | ||||
|  | @ -297,7 +297,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, | |||
| 
 | ||||
| 	spin_lock_irqsave(&(tbl->it_lock), flags); | ||||
| 
 | ||||
| 	for (s = outs; nelems; nelems--, s++) { | ||||
| 	for_each_sg(sglist, s, nelems, i) { | ||||
| 		unsigned long vaddr, npages, entry, slen; | ||||
| 
 | ||||
| 		slen = s->length; | ||||
|  | @ -341,7 +341,8 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, | |||
| 			if (novmerge || (dma_addr != dma_next)) { | ||||
| 				/* Can't merge: create a new segment */ | ||||
| 				segstart = s; | ||||
| 				outcount++; outs++; | ||||
| 				outcount++; | ||||
| 				outs = sg_next(outs); | ||||
| 				DBG("    can't merge, new segment.\n"); | ||||
| 			} else { | ||||
| 				outs->dma_length += s->length; | ||||
|  | @ -374,7 +375,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, | |||
| 	 * next entry of the sglist if we didn't fill the list completely | ||||
| 	 */ | ||||
| 	if (outcount < incount) { | ||||
| 		outs++; | ||||
| 		outs = sg_next(outs); | ||||
| 		outs->dma_address = DMA_ERROR_CODE; | ||||
| 		outs->dma_length = 0; | ||||
| 	} | ||||
|  | @ -385,7 +386,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, | |||
| 	return outcount; | ||||
| 
 | ||||
|  failure: | ||||
| 	for (s = &sglist[0]; s <= outs; s++) { | ||||
| 	for_each_sg(sglist, s, nelems, i) { | ||||
| 		if (s->dma_length != 0) { | ||||
| 			unsigned long vaddr, npages; | ||||
| 
 | ||||
|  | @ -395,6 +396,8 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, | |||
| 			s->dma_address = DMA_ERROR_CODE; | ||||
| 			s->dma_length = 0; | ||||
| 		} | ||||
| 		if (s == outs) | ||||
| 			break; | ||||
| 	} | ||||
| 	spin_unlock_irqrestore(&(tbl->it_lock), flags); | ||||
| 	return 0; | ||||
|  | @ -404,6 +407,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, | |||
| void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, | ||||
| 		int nelems, enum dma_data_direction direction) | ||||
| { | ||||
| 	struct scatterlist *sg; | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	BUG_ON(direction == DMA_NONE); | ||||
|  | @ -413,15 +417,16 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, | |||
| 
 | ||||
| 	spin_lock_irqsave(&(tbl->it_lock), flags); | ||||
| 
 | ||||
| 	sg = sglist; | ||||
| 	while (nelems--) { | ||||
| 		unsigned int npages; | ||||
| 		dma_addr_t dma_handle = sglist->dma_address; | ||||
| 		dma_addr_t dma_handle = sg->dma_address; | ||||
| 
 | ||||
| 		if (sglist->dma_length == 0) | ||||
| 		if (sg->dma_length == 0) | ||||
| 			break; | ||||
| 		npages = iommu_num_pages(dma_handle,sglist->dma_length); | ||||
| 		npages = iommu_num_pages(dma_handle, sg->dma_length); | ||||
| 		__iommu_free(tbl, dma_handle, npages); | ||||
| 		sglist++; | ||||
| 		sg = sg_next(sg); | ||||
| 	} | ||||
| 
 | ||||
| 	/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
 | ||||
|  |  | |||
|  | @ -616,17 +616,18 @@ static void ps3_unmap_single(struct device *_dev, dma_addr_t dma_addr, | |||
| 	} | ||||
| } | ||||
| 
 | ||||
| static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sg, int nents, | ||||
| 	enum dma_data_direction direction) | ||||
| static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sgl, | ||||
| 	int nents, enum dma_data_direction direction) | ||||
| { | ||||
| #if defined(CONFIG_PS3_DYNAMIC_DMA) | ||||
| 	BUG_ON("do"); | ||||
| 	return -EPERM; | ||||
| #else | ||||
| 	struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); | ||||
| 	struct scatterlist *sg; | ||||
| 	int i; | ||||
| 
 | ||||
| 	for (i = 0; i < nents; i++, sg++) { | ||||
| 	for_each_sg(sgl, sg, nents, i) { | ||||
| 		int result = ps3_dma_map(dev->d_region, | ||||
| 			page_to_phys(sg->page) + sg->offset, sg->length, | ||||
| 					 &sg->dma_address, 0); | ||||
|  |  | |||
|  | @ -35,6 +35,7 @@ | |||
| #include <linux/slab.h> | ||||
| #include <linux/pci.h>		/* struct pci_dev */ | ||||
| #include <linux/proc_fs.h> | ||||
| #include <linux/scatterlist.h> | ||||
| 
 | ||||
| #include <asm/io.h> | ||||
| #include <asm/vaddrs.h> | ||||
|  | @ -717,19 +718,19 @@ void pci_unmap_page(struct pci_dev *hwdev, | |||
|  * Device ownership issues as mentioned above for pci_map_single are | ||||
|  * the same here. | ||||
|  */ | ||||
| int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, | ||||
| int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, | ||||
|     int direction) | ||||
| { | ||||
| 	struct scatterlist *sg; | ||||
| 	int n; | ||||
| 
 | ||||
| 	BUG_ON(direction == PCI_DMA_NONE); | ||||
| 	/* IIep is write-through, not flushing. */ | ||||
| 	for (n = 0; n < nents; n++) { | ||||
| 	for_each_sg(sgl, sg, nents, n) { | ||||
| 		BUG_ON(page_address(sg->page) == NULL); | ||||
| 		sg->dvma_address = | ||||
| 			virt_to_phys(page_address(sg->page)) + sg->offset; | ||||
| 		sg->dvma_length = sg->length; | ||||
| 		sg++; | ||||
| 	} | ||||
| 	return nents; | ||||
| } | ||||
|  | @ -738,19 +739,19 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, | |||
|  * Again, cpu read rules concerning calls here are the same as for | ||||
|  * pci_unmap_single() above. | ||||
|  */ | ||||
| void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, | ||||
| void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, | ||||
|     int direction) | ||||
| { | ||||
| 	struct scatterlist *sg; | ||||
| 	int n; | ||||
| 
 | ||||
| 	BUG_ON(direction == PCI_DMA_NONE); | ||||
| 	if (direction != PCI_DMA_TODEVICE) { | ||||
| 		for (n = 0; n < nents; n++) { | ||||
| 		for_each_sg(sgl, sg, nents, n) { | ||||
| 			BUG_ON(page_address(sg->page) == NULL); | ||||
| 			mmu_inval_dma_area( | ||||
| 			    (unsigned long) page_address(sg->page), | ||||
| 			    (sg->length + PAGE_SIZE-1) & PAGE_MASK); | ||||
| 			sg++; | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | @ -789,34 +790,34 @@ void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t | |||
|  * The same as pci_dma_sync_single_* but for a scatter-gather list, | ||||
|  * same rules and usage. | ||||
|  */ | ||||
| void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) | ||||
| void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) | ||||
| { | ||||
| 	struct scatterlist *sg; | ||||
| 	int n; | ||||
| 
 | ||||
| 	BUG_ON(direction == PCI_DMA_NONE); | ||||
| 	if (direction != PCI_DMA_TODEVICE) { | ||||
| 		for (n = 0; n < nents; n++) { | ||||
| 		for_each_sg(sgl, sg, nents, n) { | ||||
| 			BUG_ON(page_address(sg->page) == NULL); | ||||
| 			mmu_inval_dma_area( | ||||
| 			    (unsigned long) page_address(sg->page), | ||||
| 			    (sg->length + PAGE_SIZE-1) & PAGE_MASK); | ||||
| 			sg++; | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) | ||||
| void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) | ||||
| { | ||||
| 	struct scatterlist *sg; | ||||
| 	int n; | ||||
| 
 | ||||
| 	BUG_ON(direction == PCI_DMA_NONE); | ||||
| 	if (direction != PCI_DMA_TODEVICE) { | ||||
| 		for (n = 0; n < nents; n++) { | ||||
| 		for_each_sg(sgl, sg, nents, n) { | ||||
| 			BUG_ON(page_address(sg->page) == NULL); | ||||
| 			mmu_inval_dma_area( | ||||
| 			    (unsigned long) page_address(sg->page), | ||||
| 			    (sg->length + PAGE_SIZE-1) & PAGE_MASK); | ||||
| 			sg++; | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  |  | |||
|  | @ -11,8 +11,8 @@ | |||
| #include <linux/mm.h> | ||||
| #include <linux/highmem.h>	/* pte_offset_map => kmap_atomic */ | ||||
| #include <linux/bitops.h> | ||||
| #include <linux/scatterlist.h> | ||||
| 
 | ||||
| #include <asm/scatterlist.h> | ||||
| #include <asm/pgalloc.h> | ||||
| #include <asm/pgtable.h> | ||||
| #include <asm/sbus.h> | ||||
|  | @ -144,8 +144,9 @@ static void iounit_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus | |||
| 	spin_lock_irqsave(&iounit->lock, flags); | ||||
| 	while (sz != 0) { | ||||
| 		--sz; | ||||
| 		sg[sz].dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg[sz].page) + sg[sz].offset, sg[sz].length); | ||||
| 		sg[sz].dvma_length = sg[sz].length; | ||||
| 		sg->dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg->page) + sg->offset, sg->length); | ||||
| 		sg->dvma_length = sg->length; | ||||
| 		sg = sg_next(sg); | ||||
| 	} | ||||
| 	spin_unlock_irqrestore(&iounit->lock, flags); | ||||
| } | ||||
|  | @ -173,11 +174,12 @@ static void iounit_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_ | |||
| 	spin_lock_irqsave(&iounit->lock, flags); | ||||
| 	while (sz != 0) { | ||||
| 		--sz; | ||||
| 		len = ((sg[sz].dvma_address & ~PAGE_MASK) + sg[sz].length + (PAGE_SIZE-1)) >> PAGE_SHIFT; | ||||
| 		vaddr = (sg[sz].dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT; | ||||
| 		len = ((sg->dvma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT; | ||||
| 		vaddr = (sg->dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT; | ||||
| 		IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr)); | ||||
| 		for (len += vaddr; vaddr < len; vaddr++) | ||||
| 			clear_bit(vaddr, iounit->bmap); | ||||
| 		sg = sg_next(sg); | ||||
| 	} | ||||
| 	spin_unlock_irqrestore(&iounit->lock, flags); | ||||
| } | ||||
|  |  | |||
|  | @ -12,8 +12,8 @@ | |||
| #include <linux/mm.h> | ||||
| #include <linux/slab.h> | ||||
| #include <linux/highmem.h>	/* pte_offset_map => kmap_atomic */ | ||||
| #include <linux/scatterlist.h> | ||||
| 
 | ||||
| #include <asm/scatterlist.h> | ||||
| #include <asm/pgalloc.h> | ||||
| #include <asm/pgtable.h> | ||||
| #include <asm/sbus.h> | ||||
|  | @ -240,7 +240,7 @@ static void iommu_get_scsi_sgl_noflush(struct scatterlist *sg, int sz, struct sb | |||
| 		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; | ||||
| 		sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset; | ||||
| 		sg->dvma_length = (__u32) sg->length; | ||||
| 		sg++; | ||||
| 		sg = sg_next(sg); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  | @ -254,7 +254,7 @@ static void iommu_get_scsi_sgl_gflush(struct scatterlist *sg, int sz, struct sbu | |||
| 		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; | ||||
| 		sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset; | ||||
| 		sg->dvma_length = (__u32) sg->length; | ||||
| 		sg++; | ||||
| 		sg = sg_next(sg); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  | @ -285,7 +285,7 @@ static void iommu_get_scsi_sgl_pflush(struct scatterlist *sg, int sz, struct sbu | |||
| 
 | ||||
| 		sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset; | ||||
| 		sg->dvma_length = (__u32) sg->length; | ||||
| 		sg++; | ||||
| 		sg = sg_next(sg); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  | @ -325,7 +325,7 @@ static void iommu_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_b | |||
| 		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; | ||||
| 		iommu_release_one(sg->dvma_address & PAGE_MASK, n, sbus); | ||||
| 		sg->dvma_address = 0x21212121; | ||||
| 		sg++; | ||||
| 		sg = sg_next(sg); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -17,8 +17,8 @@ | |||
| #include <linux/highmem.h> | ||||
| #include <linux/fs.h> | ||||
| #include <linux/seq_file.h> | ||||
| #include <linux/scatterlist.h> | ||||
| 
 | ||||
| #include <asm/scatterlist.h> | ||||
| #include <asm/page.h> | ||||
| #include <asm/pgalloc.h> | ||||
| #include <asm/pgtable.h> | ||||
|  | @ -1228,8 +1228,9 @@ static void sun4c_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus * | |||
| { | ||||
| 	while (sz != 0) { | ||||
| 		--sz; | ||||
| 		sg[sz].dvma_address = (__u32)sun4c_lockarea(page_address(sg[sz].page) + sg[sz].offset, sg[sz].length); | ||||
| 		sg[sz].dvma_length = sg[sz].length; | ||||
| 		sg->dvma_address = (__u32)sun4c_lockarea(page_address(sg->page) + sg->offset, sg->length); | ||||
| 		sg->dvma_length = sg->length; | ||||
| 		sg = sg_next(sg); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  | @ -1244,7 +1245,8 @@ static void sun4c_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_b | |||
| { | ||||
| 	while (sz != 0) { | ||||
| 		--sz; | ||||
| 		sun4c_unlockarea((char *)sg[sz].dvma_address, sg[sz].length); | ||||
| 		sun4c_unlockarea((char *)sg->dvma_address, sg->length); | ||||
| 		sg = sg_next(sg); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -10,6 +10,7 @@ | |||
| #include <linux/device.h> | ||||
| #include <linux/dma-mapping.h> | ||||
| #include <linux/errno.h> | ||||
| #include <linux/scatterlist.h> | ||||
| 
 | ||||
| #ifdef CONFIG_PCI | ||||
| #include <linux/pci.h> | ||||
|  | @ -480,7 +481,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, | |||
| 			   unsigned long iopte_protection) | ||||
| { | ||||
| 	struct scatterlist *dma_sg = sg; | ||||
| 	struct scatterlist *sg_end = sg + nelems; | ||||
| 	struct scatterlist *sg_end = sg_last(sg, nelems); | ||||
| 	int i; | ||||
| 
 | ||||
| 	for (i = 0; i < nused; i++) { | ||||
|  | @ -515,7 +516,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, | |||
| 					len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL))); | ||||
| 					break; | ||||
| 				} | ||||
| 				sg++; | ||||
| 				sg = sg_next(sg); | ||||
| 			} | ||||
| 
 | ||||
| 			pteval = iopte_protection | (pteval & IOPTE_PAGE); | ||||
|  | @ -528,24 +529,24 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, | |||
| 			} | ||||
| 
 | ||||
| 			pteval = (pteval & IOPTE_PAGE) + len; | ||||
| 			sg++; | ||||
| 			sg = sg_next(sg); | ||||
| 
 | ||||
| 			/* Skip over any tail mappings we've fully mapped,
 | ||||
| 			 * adjusting pteval along the way.  Stop when we | ||||
| 			 * detect a page crossing event. | ||||
| 			 */ | ||||
| 			while (sg < sg_end && | ||||
| 			while (sg != sg_end && | ||||
| 			       (pteval << (64 - IO_PAGE_SHIFT)) != 0UL && | ||||
| 			       (pteval == SG_ENT_PHYS_ADDRESS(sg)) && | ||||
| 			       ((pteval ^ | ||||
| 				 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) { | ||||
| 				pteval += sg->length; | ||||
| 				sg++; | ||||
| 				sg = sg_next(sg); | ||||
| 			} | ||||
| 			if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL) | ||||
| 				pteval = ~0UL; | ||||
| 		} while (dma_npages != 0); | ||||
| 		dma_sg++; | ||||
| 		dma_sg = sg_next(dma_sg); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  | @ -606,7 +607,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, | |||
| 	sgtmp = sglist; | ||||
| 	while (used && sgtmp->dma_length) { | ||||
| 		sgtmp->dma_address += dma_base; | ||||
| 		sgtmp++; | ||||
| 		sgtmp = sg_next(sgtmp); | ||||
| 		used--; | ||||
| 	} | ||||
| 	used = nelems - used; | ||||
|  | @ -642,6 +643,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
| 	struct strbuf *strbuf; | ||||
| 	iopte_t *base; | ||||
| 	unsigned long flags, ctx, i, npages; | ||||
| 	struct scatterlist *sg, *sgprv; | ||||
| 	u32 bus_addr; | ||||
| 
 | ||||
| 	if (unlikely(direction == DMA_NONE)) { | ||||
|  | @ -654,11 +656,14 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
| 
 | ||||
| 	bus_addr = sglist->dma_address & IO_PAGE_MASK; | ||||
| 
 | ||||
| 	for (i = 1; i < nelems; i++) | ||||
| 		if (sglist[i].dma_length == 0) | ||||
| 	sgprv = NULL; | ||||
| 	for_each_sg(sglist, sg, nelems, i) { | ||||
| 		if (sg->dma_length == 0) | ||||
| 			break; | ||||
| 	i--; | ||||
| 	npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - | ||||
| 		sgprv = sg; | ||||
| 	} | ||||
| 
 | ||||
| 	npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) - | ||||
| 		  bus_addr) >> IO_PAGE_SHIFT; | ||||
| 
 | ||||
| 	base = iommu->page_table + | ||||
|  | @ -730,6 +735,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev, | |||
| 	struct iommu *iommu; | ||||
| 	struct strbuf *strbuf; | ||||
| 	unsigned long flags, ctx, npages, i; | ||||
| 	struct scatterlist *sg, *sgprv; | ||||
| 	u32 bus_addr; | ||||
| 
 | ||||
| 	iommu = dev->archdata.iommu; | ||||
|  | @ -753,11 +759,14 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev, | |||
| 
 | ||||
| 	/* Step 2: Kick data out of streaming buffers. */ | ||||
| 	bus_addr = sglist[0].dma_address & IO_PAGE_MASK; | ||||
| 	for(i = 1; i < nelems; i++) | ||||
| 		if (!sglist[i].dma_length) | ||||
| 	sgprv = NULL; | ||||
| 	for_each_sg(sglist, sg, nelems, i) { | ||||
| 		if (sg->dma_length == 0) | ||||
| 			break; | ||||
| 	i--; | ||||
| 	npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) | ||||
| 		sgprv = sg; | ||||
| 	} | ||||
| 
 | ||||
| 	npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) | ||||
| 		  - bus_addr) >> IO_PAGE_SHIFT; | ||||
| 	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); | ||||
| 
 | ||||
|  |  | |||
|  | @ -13,6 +13,7 @@ | |||
| #include <linux/irq.h> | ||||
| #include <linux/msi.h> | ||||
| #include <linux/log2.h> | ||||
| #include <linux/scatterlist.h> | ||||
| 
 | ||||
| #include <asm/iommu.h> | ||||
| #include <asm/irq.h> | ||||
|  | @ -373,7 +374,7 @@ static inline long fill_sg(long entry, struct device *dev, | |||
| 			   int nused, int nelems, unsigned long prot) | ||||
| { | ||||
| 	struct scatterlist *dma_sg = sg; | ||||
| 	struct scatterlist *sg_end = sg + nelems; | ||||
| 	struct scatterlist *sg_end = sg_last(sg, nelems); | ||||
| 	unsigned long flags; | ||||
| 	int i; | ||||
| 
 | ||||
|  | @ -413,7 +414,7 @@ static inline long fill_sg(long entry, struct device *dev, | |||
| 					len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL))); | ||||
| 					break; | ||||
| 				} | ||||
| 				sg++; | ||||
| 				sg = sg_next(sg); | ||||
| 			} | ||||
| 
 | ||||
| 			pteval = (pteval & IOPTE_PAGE); | ||||
|  | @ -431,24 +432,25 @@ static inline long fill_sg(long entry, struct device *dev, | |||
| 			} | ||||
| 
 | ||||
| 			pteval = (pteval & IOPTE_PAGE) + len; | ||||
| 			sg++; | ||||
| 			sg = sg_next(sg); | ||||
| 
 | ||||
| 			/* Skip over any tail mappings we've fully mapped,
 | ||||
| 			 * adjusting pteval along the way.  Stop when we | ||||
| 			 * detect a page crossing event. | ||||
| 			 */ | ||||
| 			while (sg < sg_end && | ||||
| 			       (pteval << (64 - IO_PAGE_SHIFT)) != 0UL && | ||||
| 			while ((pteval << (64 - IO_PAGE_SHIFT)) != 0UL && | ||||
| 			       (pteval == SG_ENT_PHYS_ADDRESS(sg)) && | ||||
| 			       ((pteval ^ | ||||
| 				 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) { | ||||
| 				pteval += sg->length; | ||||
| 				sg++; | ||||
| 				if (sg == sg_end) | ||||
| 					break; | ||||
| 				sg = sg_next(sg); | ||||
| 			} | ||||
| 			if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL) | ||||
| 				pteval = ~0UL; | ||||
| 		} while (dma_npages != 0); | ||||
| 		dma_sg++; | ||||
| 		dma_sg = sg_next(dma_sg); | ||||
| 	} | ||||
| 
 | ||||
| 	if (unlikely(iommu_batch_end() < 0L)) | ||||
|  | @ -510,7 +512,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, | |||
| 	sgtmp = sglist; | ||||
| 	while (used && sgtmp->dma_length) { | ||||
| 		sgtmp->dma_address += dma_base; | ||||
| 		sgtmp++; | ||||
| 		sgtmp = sg_next(sgtmp); | ||||
| 		used--; | ||||
| 	} | ||||
| 	used = nelems - used; | ||||
|  | @ -545,6 +547,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
| 	struct pci_pbm_info *pbm; | ||||
| 	struct iommu *iommu; | ||||
| 	unsigned long flags, i, npages; | ||||
| 	struct scatterlist *sg, *sgprv; | ||||
| 	long entry; | ||||
| 	u32 devhandle, bus_addr; | ||||
| 
 | ||||
|  | @ -558,12 +561,15 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
| 	devhandle = pbm->devhandle; | ||||
| 	 | ||||
| 	bus_addr = sglist->dma_address & IO_PAGE_MASK; | ||||
| 
 | ||||
| 	for (i = 1; i < nelems; i++) | ||||
| 		if (sglist[i].dma_length == 0) | ||||
| 	sgprv = NULL; | ||||
| 	for_each_sg(sglist, sg, nelems, i) { | ||||
| 		if (sg->dma_length == 0) | ||||
| 			break; | ||||
| 	i--; | ||||
| 	npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - | ||||
| 
 | ||||
| 		sgprv = sg; | ||||
| 	} | ||||
| 
 | ||||
| 	npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) - | ||||
| 		  bus_addr) >> IO_PAGE_SHIFT; | ||||
| 
 | ||||
| 	entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | ||||
|  |  | |||
|  | @ -35,6 +35,7 @@ | |||
| #include <linux/pci_ids.h> | ||||
| #include <linux/pci.h> | ||||
| #include <linux/delay.h> | ||||
| #include <linux/scatterlist.h> | ||||
| #include <asm/iommu.h> | ||||
| #include <asm/calgary.h> | ||||
| #include <asm/tce.h> | ||||
|  | @ -384,31 +385,32 @@ static void calgary_unmap_sg(struct device *dev, | |||
| 	struct scatterlist *sglist, int nelems, int direction) | ||||
| { | ||||
| 	struct iommu_table *tbl = find_iommu_table(dev); | ||||
| 	struct scatterlist *s; | ||||
| 	int i; | ||||
| 
 | ||||
| 	if (!translate_phb(to_pci_dev(dev))) | ||||
| 		return; | ||||
| 
 | ||||
| 	while (nelems--) { | ||||
| 	for_each_sg(sglist, s, nelems, i) { | ||||
| 		unsigned int npages; | ||||
| 		dma_addr_t dma = sglist->dma_address; | ||||
| 		unsigned int dmalen = sglist->dma_length; | ||||
| 		dma_addr_t dma = s->dma_address; | ||||
| 		unsigned int dmalen = s->dma_length; | ||||
| 
 | ||||
| 		if (dmalen == 0) | ||||
| 			break; | ||||
| 
 | ||||
| 		npages = num_dma_pages(dma, dmalen); | ||||
| 		iommu_free(tbl, dma, npages); | ||||
| 		sglist++; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static int calgary_nontranslate_map_sg(struct device* dev, | ||||
| 	struct scatterlist *sg, int nelems, int direction) | ||||
| { | ||||
| 	struct scatterlist *s; | ||||
| 	int i; | ||||
| 
 | ||||
| 	for (i = 0; i < nelems; i++ ) { | ||||
| 		struct scatterlist *s = &sg[i]; | ||||
| 	for_each_sg(sg, s, nelems, i) { | ||||
| 		BUG_ON(!s->page); | ||||
| 		s->dma_address = virt_to_bus(page_address(s->page) +s->offset); | ||||
| 		s->dma_length = s->length; | ||||
|  | @ -420,6 +422,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, | |||
| 	int nelems, int direction) | ||||
| { | ||||
| 	struct iommu_table *tbl = find_iommu_table(dev); | ||||
| 	struct scatterlist *s; | ||||
| 	unsigned long vaddr; | ||||
| 	unsigned int npages; | ||||
| 	unsigned long entry; | ||||
|  | @ -428,8 +431,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, | |||
| 	if (!translate_phb(to_pci_dev(dev))) | ||||
| 		return calgary_nontranslate_map_sg(dev, sg, nelems, direction); | ||||
| 
 | ||||
| 	for (i = 0; i < nelems; i++ ) { | ||||
| 		struct scatterlist *s = &sg[i]; | ||||
| 	for_each_sg(sg, s, nelems, i) { | ||||
| 		BUG_ON(!s->page); | ||||
| 
 | ||||
| 		vaddr = (unsigned long)page_address(s->page) + s->offset; | ||||
|  | @ -454,9 +456,9 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, | |||
| 	return nelems; | ||||
| error: | ||||
| 	calgary_unmap_sg(dev, sg, nelems, direction); | ||||
| 	for (i = 0; i < nelems; i++) { | ||||
| 		sg[i].dma_address = bad_dma_address; | ||||
| 		sg[i].dma_length = 0; | ||||
| 	for_each_sg(sg, s, nelems, i) { | ||||
| 		sg->dma_address = bad_dma_address; | ||||
| 		sg->dma_length = 0; | ||||
| 	} | ||||
| 	return 0; | ||||
| } | ||||
|  |  | |||
|  | @ -23,6 +23,7 @@ | |||
| #include <linux/interrupt.h> | ||||
| #include <linux/bitops.h> | ||||
| #include <linux/kdebug.h> | ||||
| #include <linux/scatterlist.h> | ||||
| #include <asm/atomic.h> | ||||
| #include <asm/io.h> | ||||
| #include <asm/mtrr.h> | ||||
|  | @ -278,10 +279,10 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, | |||
|  */ | ||||
| static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | ||||
| { | ||||
| 	struct scatterlist *s; | ||||
| 	int i; | ||||
| 
 | ||||
| 	for (i = 0; i < nents; i++) { | ||||
| 		struct scatterlist *s = &sg[i]; | ||||
| 	for_each_sg(sg, s, nents, i) { | ||||
| 		if (!s->dma_length || !s->length) | ||||
| 			break; | ||||
| 		gart_unmap_single(dev, s->dma_address, s->dma_length, dir); | ||||
|  | @ -292,14 +293,14 @@ static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
| static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, | ||||
| 			       int nents, int dir) | ||||
| { | ||||
| 	struct scatterlist *s; | ||||
| 	int i; | ||||
| 
 | ||||
| #ifdef CONFIG_IOMMU_DEBUG | ||||
| 	printk(KERN_DEBUG "dma_map_sg overflow\n"); | ||||
| #endif | ||||
| 
 | ||||
|  	for (i = 0; i < nents; i++ ) { | ||||
| 		struct scatterlist *s = &sg[i]; | ||||
| 	for_each_sg(sg, s, nents, i) { | ||||
| 		unsigned long addr = page_to_phys(s->page) + s->offset;  | ||||
| 		if (nonforced_iommu(dev, addr, s->length)) {  | ||||
| 			addr = dma_map_area(dev, addr, s->length, dir); | ||||
|  | @ -319,23 +320,23 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, | |||
| } | ||||
| 
 | ||||
| /* Map multiple scatterlist entries continuous into the first. */ | ||||
| static int __dma_map_cont(struct scatterlist *sg, int start, int stopat, | ||||
| static int __dma_map_cont(struct scatterlist *start, int nelems, | ||||
| 		      struct scatterlist *sout, unsigned long pages) | ||||
| { | ||||
| 	unsigned long iommu_start = alloc_iommu(pages); | ||||
| 	unsigned long iommu_page = iommu_start;  | ||||
| 	struct scatterlist *s; | ||||
| 	int i; | ||||
| 
 | ||||
| 	if (iommu_start == -1) | ||||
| 		return -1; | ||||
| 	 | ||||
| 	for (i = start; i < stopat; i++) { | ||||
| 		struct scatterlist *s = &sg[i]; | ||||
| 
 | ||||
| 	for_each_sg(start, s, nelems, i) { | ||||
| 		unsigned long pages, addr; | ||||
| 		unsigned long phys_addr = s->dma_address; | ||||
| 		 | ||||
| 		BUG_ON(i > start && s->offset); | ||||
| 		if (i == start) { | ||||
| 		BUG_ON(s != start && s->offset); | ||||
| 		if (s == start) { | ||||
| 			*sout = *s;  | ||||
| 			sout->dma_address = iommu_bus_base; | ||||
| 			sout->dma_address += iommu_page*PAGE_SIZE + s->offset; | ||||
|  | @ -357,17 +358,17 @@ static int __dma_map_cont(struct scatterlist *sg, int start, int stopat, | |||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static inline int dma_map_cont(struct scatterlist *sg, int start, int stopat, | ||||
| static inline int dma_map_cont(struct scatterlist *start, int nelems, | ||||
| 		      struct scatterlist *sout, | ||||
| 		      unsigned long pages, int need) | ||||
| { | ||||
| 	if (!need) {  | ||||
| 		BUG_ON(stopat - start != 1); | ||||
| 		*sout = sg[start];  | ||||
| 		sout->dma_length = sg[start].length;  | ||||
| 	if (!need) { | ||||
| 		BUG_ON(nelems != 1); | ||||
| 		*sout = *start; | ||||
| 		sout->dma_length = start->length; | ||||
| 		return 0; | ||||
| 	}  | ||||
| 	return __dma_map_cont(sg, start, stopat, sout, pages); | ||||
| 	} | ||||
| 	return __dma_map_cont(start, nelems, sout, pages); | ||||
| } | ||||
| 		 | ||||
| /*
 | ||||
|  | @ -381,6 +382,7 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | |||
| 	int start; | ||||
| 	unsigned long pages = 0; | ||||
| 	int need = 0, nextneed; | ||||
| 	struct scatterlist *s, *ps, *start_sg, *sgmap; | ||||
| 
 | ||||
| 	if (nents == 0)  | ||||
| 		return 0; | ||||
|  | @ -390,8 +392,9 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | |||
| 
 | ||||
| 	out = 0; | ||||
| 	start = 0; | ||||
| 	for (i = 0; i < nents; i++) { | ||||
| 		struct scatterlist *s = &sg[i]; | ||||
| 	start_sg = sgmap = sg; | ||||
| 	ps = NULL; /* shut up gcc */ | ||||
| 	for_each_sg(sg, s, nents, i) { | ||||
| 		dma_addr_t addr = page_to_phys(s->page) + s->offset; | ||||
| 		s->dma_address = addr; | ||||
| 		BUG_ON(s->length == 0);  | ||||
|  | @ -400,29 +403,33 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | |||
| 
 | ||||
| 		/* Handle the previous not yet processed entries */ | ||||
| 		if (i > start) { | ||||
| 			struct scatterlist *ps = &sg[i-1]; | ||||
| 			/* Can only merge when the last chunk ends on a page 
 | ||||
| 			   boundary and the new one doesn't have an offset. */ | ||||
| 			if (!iommu_merge || !nextneed || !need || s->offset || | ||||
| 			    (ps->offset + ps->length) % PAGE_SIZE) {  | ||||
| 				if (dma_map_cont(sg, start, i, sg+out, pages, | ||||
| 						 need) < 0) | ||||
| 			    (ps->offset + ps->length) % PAGE_SIZE) { | ||||
| 				if (dma_map_cont(start_sg, i - start, sgmap, | ||||
| 						  pages, need) < 0) | ||||
| 					goto error; | ||||
| 				out++; | ||||
| 				sgmap = sg_next(sgmap); | ||||
| 				pages = 0; | ||||
| 				start = i;	 | ||||
| 				start = i; | ||||
| 				start_sg = s; | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		need = nextneed; | ||||
| 		pages += to_pages(s->offset, s->length); | ||||
| 		ps = s; | ||||
| 	} | ||||
| 	if (dma_map_cont(sg, start, i, sg+out, pages, need) < 0) | ||||
| 	if (dma_map_cont(start_sg, i - start, sgmap, pages, need) < 0) | ||||
| 		goto error; | ||||
| 	out++; | ||||
| 	flush_gart(); | ||||
| 	if (out < nents)  | ||||
| 		sg[out].dma_length = 0;  | ||||
| 	if (out < nents) { | ||||
| 		sgmap = sg_next(sgmap); | ||||
| 		sgmap->dma_length = 0; | ||||
| 	} | ||||
| 	return out; | ||||
| 
 | ||||
| error: | ||||
|  | @ -437,8 +444,8 @@ error: | |||
| 	if (panic_on_overflow) | ||||
| 		panic("dma_map_sg: overflow on %lu pages\n", pages); | ||||
| 	iommu_full(dev, pages << PAGE_SHIFT, dir); | ||||
| 	for (i = 0; i < nents; i++) | ||||
| 		sg[i].dma_address = bad_dma_address; | ||||
| 	for_each_sg(sg, s, nents, i) | ||||
| 		s->dma_address = bad_dma_address; | ||||
| 	return 0; | ||||
| }  | ||||
| 
 | ||||
|  |  | |||
|  | @ -5,6 +5,7 @@ | |||
| #include <linux/pci.h> | ||||
| #include <linux/string.h> | ||||
| #include <linux/dma-mapping.h> | ||||
| #include <linux/scatterlist.h> | ||||
| 
 | ||||
| #include <asm/iommu.h> | ||||
| #include <asm/processor.h> | ||||
|  | @ -57,10 +58,10 @@ static void nommu_unmap_single(struct device *dev, dma_addr_t addr,size_t size, | |||
| static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, | ||||
| 	       int nents, int direction) | ||||
| { | ||||
| 	struct scatterlist *s; | ||||
| 	int i; | ||||
| 
 | ||||
|  	for (i = 0; i < nents; i++ ) { | ||||
| 		struct scatterlist *s = &sg[i]; | ||||
| 	for_each_sg(sg, s, nents, i) { | ||||
| 		BUG_ON(!s->page); | ||||
| 		s->dma_address = virt_to_bus(page_address(s->page) +s->offset); | ||||
| 		if (!check_addr("map_sg", hwdev, s->dma_address, s->length)) | ||||
|  |  | |||
|  | @ -30,6 +30,7 @@ | |||
| #include <linux/cpu.h> | ||||
| #include <linux/blktrace_api.h> | ||||
| #include <linux/fault-inject.h> | ||||
| #include <linux/scatterlist.h> | ||||
| 
 | ||||
| /*
 | ||||
|  * for max sense size | ||||
|  | @ -1318,9 +1319,10 @@ static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio, | |||
|  * must make sure sg can hold rq->nr_phys_segments entries | ||||
|  */ | ||||
| int blk_rq_map_sg(struct request_queue *q, struct request *rq, | ||||
| 		  struct scatterlist *sg) | ||||
| 		  struct scatterlist *sglist) | ||||
| { | ||||
| 	struct bio_vec *bvec, *bvprv; | ||||
| 	struct scatterlist *next_sg, *sg; | ||||
| 	struct req_iterator iter; | ||||
| 	int nsegs, cluster; | ||||
| 
 | ||||
|  | @ -1331,11 +1333,12 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |||
| 	 * for each bio in rq | ||||
| 	 */ | ||||
| 	bvprv = NULL; | ||||
| 	sg = next_sg = &sglist[0]; | ||||
| 	rq_for_each_segment(bvec, rq, iter) { | ||||
| 		int nbytes = bvec->bv_len; | ||||
| 
 | ||||
| 		if (bvprv && cluster) { | ||||
| 			if (sg[nsegs - 1].length + nbytes > q->max_segment_size) | ||||
| 			if (sg->length + nbytes > q->max_segment_size) | ||||
| 				goto new_segment; | ||||
| 
 | ||||
| 			if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) | ||||
|  | @ -1343,14 +1346,15 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |||
| 			if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) | ||||
| 				goto new_segment; | ||||
| 
 | ||||
| 			sg[nsegs - 1].length += nbytes; | ||||
| 			sg->length += nbytes; | ||||
| 		} else { | ||||
| new_segment: | ||||
| 			memset(&sg[nsegs],0,sizeof(struct scatterlist)); | ||||
| 			sg[nsegs].page = bvec->bv_page; | ||||
| 			sg[nsegs].length = nbytes; | ||||
| 			sg[nsegs].offset = bvec->bv_offset; | ||||
| 			sg = next_sg; | ||||
| 			next_sg = sg_next(sg); | ||||
| 
 | ||||
| 			sg->page = bvec->bv_page; | ||||
| 			sg->length = nbytes; | ||||
| 			sg->offset = bvec->bv_offset; | ||||
| 			nsegs++; | ||||
| 		} | ||||
| 		bvprv = bvec; | ||||
|  | @ -4068,7 +4072,23 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) | |||
| 	return queue_var_show(max_hw_sectors_kb, (page)); | ||||
| } | ||||
| 
 | ||||
| static ssize_t queue_max_segments_show(struct request_queue *q, char *page) | ||||
| { | ||||
| 	return queue_var_show(q->max_phys_segments, page); | ||||
| } | ||||
| 
 | ||||
| static ssize_t queue_max_segments_store(struct request_queue *q, | ||||
| 					const char *page, size_t count) | ||||
| { | ||||
| 	unsigned long segments; | ||||
| 	ssize_t ret = queue_var_store(&segments, page, count); | ||||
| 
 | ||||
| 	spin_lock_irq(q->queue_lock); | ||||
| 	q->max_phys_segments = segments; | ||||
| 	spin_unlock_irq(q->queue_lock); | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| static struct queue_sysfs_entry queue_requests_entry = { | ||||
| 	.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, | ||||
| 	.show = queue_requests_show, | ||||
|  | @ -4092,6 +4112,12 @@ static struct queue_sysfs_entry queue_max_hw_sectors_entry = { | |||
| 	.show = queue_max_hw_sectors_show, | ||||
| }; | ||||
| 
 | ||||
| static struct queue_sysfs_entry queue_max_segments_entry = { | ||||
| 	.attr = {.name = "max_segments", .mode = S_IRUGO | S_IWUSR }, | ||||
| 	.show = queue_max_segments_show, | ||||
| 	.store = queue_max_segments_store, | ||||
| }; | ||||
| 
 | ||||
| static struct queue_sysfs_entry queue_iosched_entry = { | ||||
| 	.attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, | ||||
| 	.show = elv_iosched_show, | ||||
|  | @ -4103,6 +4129,7 @@ static struct attribute *default_attrs[] = { | |||
| 	&queue_ra_entry.attr, | ||||
| 	&queue_max_hw_sectors_entry.attr, | ||||
| 	&queue_max_sectors_entry.attr, | ||||
| 	&queue_max_segments_entry.attr, | ||||
| 	&queue_iosched_entry.attr, | ||||
| 	NULL, | ||||
| }; | ||||
|  |  | |||
|  | @ -77,7 +77,7 @@ static int update2(struct hash_desc *desc, | |||
| 
 | ||||
| 		if (!nbytes) | ||||
| 			break; | ||||
| 		sg = sg_next(sg); | ||||
| 		sg = scatterwalk_sg_next(sg); | ||||
| 	} | ||||
| 
 | ||||
| 	return 0; | ||||
|  |  | |||
|  | @ -62,7 +62,7 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out, | |||
| 		walk->offset += PAGE_SIZE - 1; | ||||
| 		walk->offset &= PAGE_MASK; | ||||
| 		if (walk->offset >= walk->sg->offset + walk->sg->length) | ||||
| 			scatterwalk_start(walk, sg_next(walk->sg)); | ||||
| 			scatterwalk_start(walk, scatterwalk_sg_next(walk->sg)); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -20,7 +20,7 @@ | |||
| 
 | ||||
| #include "internal.h" | ||||
| 
 | ||||
| static inline struct scatterlist *sg_next(struct scatterlist *sg) | ||||
| static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg) | ||||
| { | ||||
| 	return (++sg)->length ? sg : (void *)sg->page; | ||||
| } | ||||
|  |  | |||
|  | @ -1410,7 +1410,7 @@ static void ata_qc_complete_internal(struct ata_queued_cmd *qc) | |||
|  */ | ||||
| unsigned ata_exec_internal_sg(struct ata_device *dev, | ||||
| 			      struct ata_taskfile *tf, const u8 *cdb, | ||||
| 			      int dma_dir, struct scatterlist *sg, | ||||
| 			      int dma_dir, struct scatterlist *sgl, | ||||
| 			      unsigned int n_elem, unsigned long timeout) | ||||
| { | ||||
| 	struct ata_link *link = dev->link; | ||||
|  | @ -1472,11 +1472,12 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, | |||
| 	qc->dma_dir = dma_dir; | ||||
| 	if (dma_dir != DMA_NONE) { | ||||
| 		unsigned int i, buflen = 0; | ||||
| 		struct scatterlist *sg; | ||||
| 
 | ||||
| 		for (i = 0; i < n_elem; i++) | ||||
| 			buflen += sg[i].length; | ||||
| 		for_each_sg(sgl, sg, n_elem, i) | ||||
| 			buflen += sg->length; | ||||
| 
 | ||||
| 		ata_sg_init(qc, sg, n_elem); | ||||
| 		ata_sg_init(qc, sgl, n_elem); | ||||
| 		qc->nbytes = buflen; | ||||
| 	} | ||||
| 
 | ||||
|  | @ -4292,7 +4293,7 @@ void ata_sg_clean(struct ata_queued_cmd *qc) | |||
| 		if (qc->n_elem) | ||||
| 			dma_unmap_sg(ap->dev, sg, qc->n_elem, dir); | ||||
| 		/* restore last sg */ | ||||
| 		sg[qc->orig_n_elem - 1].length += qc->pad_len; | ||||
| 		sg_last(sg, qc->orig_n_elem)->length += qc->pad_len; | ||||
| 		if (pad_buf) { | ||||
| 			struct scatterlist *psg = &qc->pad_sgent; | ||||
| 			void *addr = kmap_atomic(psg->page, KM_IRQ0); | ||||
|  | @ -4547,6 +4548,7 @@ void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen) | |||
| 	qc->orig_n_elem = 1; | ||||
| 	qc->buf_virt = buf; | ||||
| 	qc->nbytes = buflen; | ||||
| 	qc->cursg = qc->__sg; | ||||
| 
 | ||||
| 	sg_init_one(&qc->sgent, buf, buflen); | ||||
| } | ||||
|  | @ -4572,6 +4574,7 @@ void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, | |||
| 	qc->__sg = sg; | ||||
| 	qc->n_elem = n_elem; | ||||
| 	qc->orig_n_elem = n_elem; | ||||
| 	qc->cursg = qc->__sg; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  | @ -4661,7 +4664,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc) | |||
| { | ||||
| 	struct ata_port *ap = qc->ap; | ||||
| 	struct scatterlist *sg = qc->__sg; | ||||
| 	struct scatterlist *lsg = &sg[qc->n_elem - 1]; | ||||
| 	struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem); | ||||
| 	int n_elem, pre_n_elem, dir, trim_sg = 0; | ||||
| 
 | ||||
| 	VPRINTK("ENTER, ata%u\n", ap->print_id); | ||||
|  | @ -4825,7 +4828,6 @@ void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf, | |||
| static void ata_pio_sector(struct ata_queued_cmd *qc) | ||||
| { | ||||
| 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); | ||||
| 	struct scatterlist *sg = qc->__sg; | ||||
| 	struct ata_port *ap = qc->ap; | ||||
| 	struct page *page; | ||||
| 	unsigned int offset; | ||||
|  | @ -4834,8 +4836,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) | |||
| 	if (qc->curbytes == qc->nbytes - qc->sect_size) | ||||
| 		ap->hsm_task_state = HSM_ST_LAST; | ||||
| 
 | ||||
| 	page = sg[qc->cursg].page; | ||||
| 	offset = sg[qc->cursg].offset + qc->cursg_ofs; | ||||
| 	page = qc->cursg->page; | ||||
| 	offset = qc->cursg->offset + qc->cursg_ofs; | ||||
| 
 | ||||
| 	/* get the current page and offset */ | ||||
| 	page = nth_page(page, (offset >> PAGE_SHIFT)); | ||||
|  | @ -4863,8 +4865,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) | |||
| 	qc->curbytes += qc->sect_size; | ||||
| 	qc->cursg_ofs += qc->sect_size; | ||||
| 
 | ||||
| 	if (qc->cursg_ofs == (&sg[qc->cursg])->length) { | ||||
| 		qc->cursg++; | ||||
| 	if (qc->cursg_ofs == qc->cursg->length) { | ||||
| 		qc->cursg = sg_next(qc->cursg); | ||||
| 		qc->cursg_ofs = 0; | ||||
| 	} | ||||
| } | ||||
|  | @ -4950,16 +4952,18 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) | |||
| { | ||||
| 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); | ||||
| 	struct scatterlist *sg = qc->__sg; | ||||
| 	struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem); | ||||
| 	struct ata_port *ap = qc->ap; | ||||
| 	struct page *page; | ||||
| 	unsigned char *buf; | ||||
| 	unsigned int offset, count; | ||||
| 	int no_more_sg = 0; | ||||
| 
 | ||||
| 	if (qc->curbytes + bytes >= qc->nbytes) | ||||
| 		ap->hsm_task_state = HSM_ST_LAST; | ||||
| 
 | ||||
| next_sg: | ||||
| 	if (unlikely(qc->cursg >= qc->n_elem)) { | ||||
| 	if (unlikely(no_more_sg)) { | ||||
| 		/*
 | ||||
| 		 * The end of qc->sg is reached and the device expects | ||||
| 		 * more data to transfer. In order not to overrun qc->sg | ||||
|  | @ -4982,7 +4986,7 @@ next_sg: | |||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	sg = &qc->__sg[qc->cursg]; | ||||
| 	sg = qc->cursg; | ||||
| 
 | ||||
| 	page = sg->page; | ||||
| 	offset = sg->offset + qc->cursg_ofs; | ||||
|  | @ -5021,7 +5025,10 @@ next_sg: | |||
| 	qc->cursg_ofs += count; | ||||
| 
 | ||||
| 	if (qc->cursg_ofs == sg->length) { | ||||
| 		qc->cursg++; | ||||
| 		if (qc->cursg == lsg) | ||||
| 			no_more_sg = 1; | ||||
| 
 | ||||
| 		qc->cursg = sg_next(qc->cursg); | ||||
| 		qc->cursg_ofs = 0; | ||||
| 	} | ||||
| 
 | ||||
|  |  | |||
|  | @ -801,8 +801,6 @@ int ata_scsi_slave_config(struct scsi_device *sdev) | |||
| 
 | ||||
| 	ata_scsi_sdev_config(sdev); | ||||
| 
 | ||||
| 	blk_queue_max_phys_segments(sdev->request_queue, LIBATA_MAX_PRD); | ||||
| 
 | ||||
| 	sdev->manage_start_stop = 1; | ||||
| 
 | ||||
| 	if (dev) | ||||
|  |  | |||
|  | @ -2569,6 +2569,7 @@ static void do_cciss_request(struct request_queue *q) | |||
| 	       (int)creq->nr_sectors); | ||||
| #endif				/* CCISS_DEBUG */ | ||||
| 
 | ||||
| 	memset(tmp_sg, 0, sizeof(tmp_sg)); | ||||
| 	seg = blk_rq_map_sg(q, creq, tmp_sg); | ||||
| 
 | ||||
| 	/* get the DMA records for the setup */ | ||||
|  |  | |||
|  | @ -939,7 +939,8 @@ static int cris_ide_build_dmatable (ide_drive_t *drive) | |||
| 		/* group sequential buffers into one large buffer */ | ||||
| 		addr = page_to_phys(sg->page) + sg->offset; | ||||
| 		size = sg_dma_len(sg); | ||||
| 		while (sg++, --i) { | ||||
| 		while (--i) { | ||||
| 			sg = sg_next(sg); | ||||
| 			if ((addr + size) != page_to_phys(sg->page) + sg->offset) | ||||
| 				break; | ||||
| 			size += sg_dma_len(sg); | ||||
|  |  | |||
|  | @ -280,7 +280,7 @@ int ide_build_dmatable (ide_drive_t *drive, struct request *rq) | |||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		sg++; | ||||
| 		sg = sg_next(sg); | ||||
| 		i--; | ||||
| 	} | ||||
| 
 | ||||
|  |  | |||
|  | @ -846,7 +846,8 @@ void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq) | |||
| 	ide_hwif_t *hwif = drive->hwif; | ||||
| 
 | ||||
| 	hwif->nsect = hwif->nleft = rq->nr_sectors; | ||||
| 	hwif->cursg = hwif->cursg_ofs = 0; | ||||
| 	hwif->cursg_ofs = 0; | ||||
| 	hwif->cursg = NULL; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL_GPL(ide_init_sg_cmd); | ||||
|  |  | |||
|  | @ -1349,7 +1349,7 @@ static int hwif_init(ide_hwif_t *hwif) | |||
| 	if (!hwif->sg_max_nents) | ||||
| 		hwif->sg_max_nents = PRD_ENTRIES; | ||||
| 
 | ||||
| 	hwif->sg_table = kmalloc(sizeof(struct scatterlist)*hwif->sg_max_nents, | ||||
| 	hwif->sg_table = kzalloc(sizeof(struct scatterlist)*hwif->sg_max_nents, | ||||
| 				 GFP_KERNEL); | ||||
| 	if (!hwif->sg_table) { | ||||
| 		printk(KERN_ERR "%s: unable to allocate SG table.\n", hwif->name); | ||||
|  |  | |||
|  | @ -45,6 +45,7 @@ | |||
| #include <linux/hdreg.h> | ||||
| #include <linux/ide.h> | ||||
| #include <linux/bitops.h> | ||||
| #include <linux/scatterlist.h> | ||||
| 
 | ||||
| #include <asm/byteorder.h> | ||||
| #include <asm/irq.h> | ||||
|  | @ -263,6 +264,7 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write) | |||
| { | ||||
| 	ide_hwif_t *hwif = drive->hwif; | ||||
| 	struct scatterlist *sg = hwif->sg_table; | ||||
| 	struct scatterlist *cursg = hwif->cursg; | ||||
| 	struct page *page; | ||||
| #ifdef CONFIG_HIGHMEM | ||||
| 	unsigned long flags; | ||||
|  | @ -270,8 +272,14 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write) | |||
| 	unsigned int offset; | ||||
| 	u8 *buf; | ||||
| 
 | ||||
| 	page = sg[hwif->cursg].page; | ||||
| 	offset = sg[hwif->cursg].offset + hwif->cursg_ofs * SECTOR_SIZE; | ||||
| 	cursg = hwif->cursg; | ||||
| 	if (!cursg) { | ||||
| 		cursg = sg; | ||||
| 		hwif->cursg = sg; | ||||
| 	} | ||||
| 
 | ||||
| 	page = cursg->page; | ||||
| 	offset = cursg->offset + hwif->cursg_ofs * SECTOR_SIZE; | ||||
| 
 | ||||
| 	/* get the current page and offset */ | ||||
| 	page = nth_page(page, (offset >> PAGE_SHIFT)); | ||||
|  | @ -285,8 +293,8 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write) | |||
| 	hwif->nleft--; | ||||
| 	hwif->cursg_ofs++; | ||||
| 
 | ||||
| 	if ((hwif->cursg_ofs * SECTOR_SIZE) == sg[hwif->cursg].length) { | ||||
| 		hwif->cursg++; | ||||
| 	if ((hwif->cursg_ofs * SECTOR_SIZE) == cursg->length) { | ||||
| 		hwif->cursg = sg_next(hwif->cursg); | ||||
| 		hwif->cursg_ofs = 0; | ||||
| 	} | ||||
| 
 | ||||
|  | @ -367,6 +375,8 @@ static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq, | |||
| 
 | ||||
| static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat) | ||||
| { | ||||
| 	HWIF(drive)->cursg = NULL; | ||||
| 
 | ||||
| 	if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { | ||||
| 		ide_task_t *task = rq->special; | ||||
| 
 | ||||
|  |  | |||
|  | @ -296,7 +296,7 @@ static int auide_build_dmatable(ide_drive_t *drive) | |||
| 			cur_addr += tc; | ||||
| 			cur_len -= tc; | ||||
| 		} | ||||
| 		sg++; | ||||
| 		sg = sg_next(sg); | ||||
| 		i--; | ||||
| 	} | ||||
| 
 | ||||
|  |  | |||
|  | @ -29,6 +29,7 @@ | |||
| #include <linux/mm.h> | ||||
| #include <linux/ioport.h> | ||||
| #include <linux/blkdev.h> | ||||
| #include <linux/scatterlist.h> | ||||
| #include <linux/ioc4.h> | ||||
| #include <asm/io.h> | ||||
| 
 | ||||
|  | @ -537,7 +538,7 @@ sgiioc4_build_dma_table(ide_drive_t * drive, struct request *rq, int ddir) | |||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		sg++; | ||||
| 		sg = sg_next(sg); | ||||
| 		i--; | ||||
| 	} | ||||
| 
 | ||||
|  |  | |||
|  | @ -1539,7 +1539,7 @@ pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq) | |||
| 			cur_len -= tc; | ||||
| 			++table; | ||||
| 		} | ||||
| 		sg++; | ||||
| 		sg = sg_next(sg); | ||||
| 		i--; | ||||
| 	} | ||||
| 
 | ||||
|  |  | |||
|  | @ -30,6 +30,7 @@ | |||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/scatterlist.h> | ||||
| #include <rdma/ib_verbs.h> | ||||
| 
 | ||||
| #include "ipath_verbs.h" | ||||
|  | @ -96,17 +97,18 @@ static void ipath_dma_unmap_page(struct ib_device *dev, | |||
| 	BUG_ON(!valid_dma_direction(direction)); | ||||
| } | ||||
| 
 | ||||
| static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents, | ||||
| 			enum dma_data_direction direction) | ||||
| static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sgl, | ||||
| 			int nents, enum dma_data_direction direction) | ||||
| { | ||||
| 	struct scatterlist *sg; | ||||
| 	u64 addr; | ||||
| 	int i; | ||||
| 	int ret = nents; | ||||
| 
 | ||||
| 	BUG_ON(!valid_dma_direction(direction)); | ||||
| 
 | ||||
| 	for (i = 0; i < nents; i++) { | ||||
| 		addr = (u64) page_address(sg[i].page); | ||||
| 	for_each_sg(sgl, sg, nents, i) { | ||||
| 		addr = (u64) page_address(sg->page); | ||||
| 		/* TODO: handle highmem pages */ | ||||
| 		if (!addr) { | ||||
| 			ret = 0; | ||||
|  |  | |||
|  | @ -124,17 +124,19 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | |||
| 
 | ||||
| 	if (cmd_dir == ISER_DIR_OUT) { | ||||
| 		/* copy the unaligned sg the buffer which is used for RDMA */ | ||||
| 		struct scatterlist *sg = (struct scatterlist *)data->buf; | ||||
| 		struct scatterlist *sgl = (struct scatterlist *)data->buf; | ||||
| 		struct scatterlist *sg; | ||||
| 		int i; | ||||
| 		char *p, *from; | ||||
| 
 | ||||
| 		for (p = mem, i = 0; i < data->size; i++) { | ||||
| 			from = kmap_atomic(sg[i].page, KM_USER0); | ||||
| 		p = mem; | ||||
| 		for_each_sg(sgl, sg, data->size, i) { | ||||
| 			from = kmap_atomic(sg->page, KM_USER0); | ||||
| 			memcpy(p, | ||||
| 			       from + sg[i].offset, | ||||
| 			       sg[i].length); | ||||
| 			       from + sg->offset, | ||||
| 			       sg->length); | ||||
| 			kunmap_atomic(from, KM_USER0); | ||||
| 			p += sg[i].length; | ||||
| 			p += sg->length; | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
|  | @ -176,7 +178,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | |||
| 
 | ||||
| 	if (cmd_dir == ISER_DIR_IN) { | ||||
| 		char *mem; | ||||
| 		struct scatterlist *sg; | ||||
| 		struct scatterlist *sgl, *sg; | ||||
| 		unsigned char *p, *to; | ||||
| 		unsigned int sg_size; | ||||
| 		int i; | ||||
|  | @ -184,16 +186,17 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | |||
| 		/* copy back read RDMA to unaligned sg */ | ||||
| 		mem	= mem_copy->copy_buf; | ||||
| 
 | ||||
| 		sg	= (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf; | ||||
| 		sgl	= (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf; | ||||
| 		sg_size = iser_ctask->data[ISER_DIR_IN].size; | ||||
| 
 | ||||
| 		for (p = mem, i = 0; i < sg_size; i++){ | ||||
| 			to = kmap_atomic(sg[i].page, KM_SOFTIRQ0); | ||||
| 			memcpy(to + sg[i].offset, | ||||
| 		p = mem; | ||||
| 		for_each_sg(sgl, sg, sg_size, i) { | ||||
| 			to = kmap_atomic(sg->page, KM_SOFTIRQ0); | ||||
| 			memcpy(to + sg->offset, | ||||
| 			       p, | ||||
| 			       sg[i].length); | ||||
| 			       sg->length); | ||||
| 			kunmap_atomic(to, KM_SOFTIRQ0); | ||||
| 			p += sg[i].length; | ||||
| 			p += sg->length; | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
|  | @ -224,7 +227,8 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data, | |||
| 			       struct iser_page_vec *page_vec, | ||||
| 			       struct ib_device *ibdev) | ||||
| { | ||||
| 	struct scatterlist *sg = (struct scatterlist *)data->buf; | ||||
| 	struct scatterlist *sgl = (struct scatterlist *)data->buf; | ||||
| 	struct scatterlist *sg; | ||||
| 	u64 first_addr, last_addr, page; | ||||
| 	int end_aligned; | ||||
| 	unsigned int cur_page = 0; | ||||
|  | @ -232,24 +236,25 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data, | |||
| 	int i; | ||||
| 
 | ||||
| 	/* compute the offset of first element */ | ||||
| 	page_vec->offset = (u64) sg[0].offset & ~MASK_4K; | ||||
| 	page_vec->offset = (u64) sgl[0].offset & ~MASK_4K; | ||||
| 
 | ||||
| 	for (i = 0; i < data->dma_nents; i++) { | ||||
| 		unsigned int dma_len = ib_sg_dma_len(ibdev, &sg[i]); | ||||
| 	for_each_sg(sgl, sg, data->dma_nents, i) { | ||||
| 		unsigned int dma_len = ib_sg_dma_len(ibdev, sg); | ||||
| 
 | ||||
| 		total_sz += dma_len; | ||||
| 
 | ||||
| 		first_addr = ib_sg_dma_address(ibdev, &sg[i]); | ||||
| 		first_addr = ib_sg_dma_address(ibdev, sg); | ||||
| 		last_addr  = first_addr + dma_len; | ||||
| 
 | ||||
| 		end_aligned   = !(last_addr  & ~MASK_4K); | ||||
| 
 | ||||
| 		/* continue to collect page fragments till aligned or SG ends */ | ||||
| 		while (!end_aligned && (i + 1 < data->dma_nents)) { | ||||
| 			sg = sg_next(sg); | ||||
| 			i++; | ||||
| 			dma_len = ib_sg_dma_len(ibdev, &sg[i]); | ||||
| 			dma_len = ib_sg_dma_len(ibdev, sg); | ||||
| 			total_sz += dma_len; | ||||
| 			last_addr = ib_sg_dma_address(ibdev, &sg[i]) + dma_len; | ||||
| 			last_addr = ib_sg_dma_address(ibdev, sg) + dma_len; | ||||
| 			end_aligned = !(last_addr  & ~MASK_4K); | ||||
| 		} | ||||
| 
 | ||||
|  | @ -284,25 +289,26 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data, | |||
| static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data, | ||||
| 					      struct ib_device *ibdev) | ||||
| { | ||||
| 	struct scatterlist *sg; | ||||
| 	struct scatterlist *sgl, *sg; | ||||
| 	u64 end_addr, next_addr; | ||||
| 	int i, cnt; | ||||
| 	unsigned int ret_len = 0; | ||||
| 
 | ||||
| 	sg = (struct scatterlist *)data->buf; | ||||
| 	sgl = (struct scatterlist *)data->buf; | ||||
| 
 | ||||
| 	for (cnt = 0, i = 0; i < data->dma_nents; i++, cnt++) { | ||||
| 	cnt = 0; | ||||
| 	for_each_sg(sgl, sg, data->dma_nents, i) { | ||||
| 		/* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX "
 | ||||
| 		   "offset: %ld sz: %ld\n", i, | ||||
| 		   (unsigned long)page_to_phys(sg[i].page), | ||||
| 		   (unsigned long)sg[i].offset, | ||||
| 		   (unsigned long)sg[i].length); */ | ||||
| 		end_addr = ib_sg_dma_address(ibdev, &sg[i]) + | ||||
| 			   ib_sg_dma_len(ibdev, &sg[i]); | ||||
| 		   (unsigned long)page_to_phys(sg->page), | ||||
| 		   (unsigned long)sg->offset, | ||||
| 		   (unsigned long)sg->length); */ | ||||
| 		end_addr = ib_sg_dma_address(ibdev, sg) + | ||||
| 			   ib_sg_dma_len(ibdev, sg); | ||||
| 		/* iser_dbg("Checking sg iobuf end address "
 | ||||
| 		       "0x%08lX\n", end_addr); */ | ||||
| 		if (i + 1 < data->dma_nents) { | ||||
| 			next_addr = ib_sg_dma_address(ibdev, &sg[i+1]); | ||||
| 			next_addr = ib_sg_dma_address(ibdev, sg_next(sg)); | ||||
| 			/* are i, i+1 fragments of the same page? */ | ||||
| 			if (end_addr == next_addr) | ||||
| 				continue; | ||||
|  | @ -322,15 +328,16 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data, | |||
| static void iser_data_buf_dump(struct iser_data_buf *data, | ||||
| 			       struct ib_device *ibdev) | ||||
| { | ||||
| 	struct scatterlist *sg = (struct scatterlist *)data->buf; | ||||
| 	struct scatterlist *sgl = (struct scatterlist *)data->buf; | ||||
| 	struct scatterlist *sg; | ||||
| 	int i; | ||||
| 
 | ||||
| 	for (i = 0; i < data->dma_nents; i++) | ||||
| 	for_each_sg(sgl, sg, data->dma_nents, i) | ||||
| 		iser_err("sg[%d] dma_addr:0x%lX page:0x%p " | ||||
| 			 "off:0x%x sz:0x%x dma_len:0x%x\n", | ||||
| 			 i, (unsigned long)ib_sg_dma_address(ibdev, &sg[i]), | ||||
| 			 sg[i].page, sg[i].offset, | ||||
| 			 sg[i].length, ib_sg_dma_len(ibdev, &sg[i])); | ||||
| 			 i, (unsigned long)ib_sg_dma_address(ibdev, sg), | ||||
| 			 sg->page, sg->offset, | ||||
| 			 sg->length, ib_sg_dma_len(ibdev, sg)); | ||||
| } | ||||
| 
 | ||||
| static void iser_dump_page_vec(struct iser_page_vec *page_vec) | ||||
|  |  | |||
|  | @ -293,7 +293,7 @@ nextSGEset: | |||
| 	for (ii=0; ii < (numSgeThisFrame-1); ii++) { | ||||
| 		thisxfer = sg_dma_len(sg); | ||||
| 		if (thisxfer == 0) { | ||||
| 			sg ++; /* Get next SG element from the OS */ | ||||
| 			sg = sg_next(sg); /* Get next SG element from the OS */ | ||||
| 			sg_done++; | ||||
| 			continue; | ||||
| 		} | ||||
|  | @ -301,7 +301,7 @@ nextSGEset: | |||
| 		v2 = sg_dma_address(sg); | ||||
| 		mptscsih_add_sge(psge, sgflags | thisxfer, v2); | ||||
| 
 | ||||
| 		sg++;		/* Get next SG element from the OS */ | ||||
| 		sg = sg_next(sg);	/* Get next SG element from the OS */ | ||||
| 		psge += (sizeof(u32) + sizeof(dma_addr_t)); | ||||
| 		sgeOffset += (sizeof(u32) + sizeof(dma_addr_t)); | ||||
| 		sg_done++; | ||||
|  | @ -322,7 +322,7 @@ nextSGEset: | |||
| 		v2 = sg_dma_address(sg); | ||||
| 		mptscsih_add_sge(psge, sgflags | thisxfer, v2); | ||||
| 		/*
 | ||||
| 		sg++; | ||||
| 		sg = sg_next(sg); | ||||
| 		psge += (sizeof(u32) + sizeof(dma_addr_t)); | ||||
| 		*/ | ||||
| 		sgeOffset += (sizeof(u32) + sizeof(dma_addr_t)); | ||||
|  |  | |||
|  | @ -153,14 +153,14 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock | |||
| 			blk_queue_max_hw_segments(mq->queue, bouncesz / 512); | ||||
| 			blk_queue_max_segment_size(mq->queue, bouncesz); | ||||
| 
 | ||||
| 			mq->sg = kmalloc(sizeof(struct scatterlist), | ||||
| 			mq->sg = kzalloc(sizeof(struct scatterlist), | ||||
| 				GFP_KERNEL); | ||||
| 			if (!mq->sg) { | ||||
| 				ret = -ENOMEM; | ||||
| 				goto cleanup_queue; | ||||
| 			} | ||||
| 
 | ||||
| 			mq->bounce_sg = kmalloc(sizeof(struct scatterlist) * | ||||
| 			mq->bounce_sg = kzalloc(sizeof(struct scatterlist) * | ||||
| 				bouncesz / 512, GFP_KERNEL); | ||||
| 			if (!mq->bounce_sg) { | ||||
| 				ret = -ENOMEM; | ||||
|  | @ -177,7 +177,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock | |||
| 		blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); | ||||
| 		blk_queue_max_segment_size(mq->queue, host->max_seg_size); | ||||
| 
 | ||||
| 		mq->sg = kmalloc(sizeof(struct scatterlist) * | ||||
| 		mq->sg = kzalloc(sizeof(struct scatterlist) * | ||||
| 			host->max_phys_segs, GFP_KERNEL); | ||||
| 		if (!mq->sg) { | ||||
| 			ret = -ENOMEM; | ||||
|  |  | |||
|  | @ -34,6 +34,7 @@ | |||
| #include <linux/slab.h> | ||||
| #include <linux/mempool.h> | ||||
| #include <linux/syscalls.h> | ||||
| #include <linux/scatterlist.h> | ||||
| #include <linux/ioctl.h> | ||||
| #include <scsi/scsi.h> | ||||
| #include <scsi/scsi_tcq.h> | ||||
|  |  | |||
|  | @ -590,7 +590,7 @@ zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | |||
|  */ | ||||
| int | ||||
| zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | ||||
|                         struct scatterlist *sg,	int sg_count, int max_sbals) | ||||
|                         struct scatterlist *sgl, int sg_count, int max_sbals) | ||||
| { | ||||
| 	int sg_index; | ||||
| 	struct scatterlist *sg_segment; | ||||
|  | @ -606,9 +606,7 @@ zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | |||
| 	sbale->flags |= sbtype; | ||||
| 
 | ||||
| 	/* process all segements of scatter-gather list */ | ||||
| 	for (sg_index = 0, sg_segment = sg, bytes = 0; | ||||
| 	     sg_index < sg_count; | ||||
| 	     sg_index++, sg_segment++) { | ||||
| 	for_each_sg(sgl, sg_segment, sg_count, sg_index) { | ||||
| 		retval = zfcp_qdio_sbals_from_segment( | ||||
| 				fsf_req, | ||||
| 				sbtype, | ||||
|  |  | |||
|  | @ -1990,6 +1990,7 @@ static struct scsi_host_template driver_template = { | |||
| 	.max_sectors		= TW_MAX_SECTORS, | ||||
| 	.cmd_per_lun		= TW_MAX_CMDS_PER_LUN, | ||||
| 	.use_clustering		= ENABLE_CLUSTERING, | ||||
| 	.use_sg_chaining	= ENABLE_SG_CHAINING, | ||||
| 	.shost_attrs		= twa_host_attrs, | ||||
| 	.emulated		= 1 | ||||
| }; | ||||
|  |  | |||
|  | @ -2261,6 +2261,7 @@ static struct scsi_host_template driver_template = { | |||
| 	.max_sectors		= TW_MAX_SECTORS, | ||||
| 	.cmd_per_lun		= TW_MAX_CMDS_PER_LUN,	 | ||||
| 	.use_clustering		= ENABLE_CLUSTERING, | ||||
| 	.use_sg_chaining	= ENABLE_SG_CHAINING, | ||||
| 	.shost_attrs		= tw_host_attrs, | ||||
| 	.emulated		= 1 | ||||
| }; | ||||
|  |  | |||
|  | @ -3575,6 +3575,7 @@ static struct scsi_host_template Bus_Logic_template = { | |||
| 	.unchecked_isa_dma = 1, | ||||
| 	.max_sectors = 128, | ||||
| 	.use_clustering = ENABLE_CLUSTERING, | ||||
| 	.use_sg_chaining = ENABLE_SG_CHAINING, | ||||
| }; | ||||
| 
 | ||||
| /*
 | ||||
|  |  | |||
|  | @ -1066,7 +1066,8 @@ static struct scsi_host_template driver_template = | |||
|      .sg_tablesize      	= 32			/*SG_ALL*/ /*SG_NONE*/,  | ||||
|      .cmd_per_lun       	= 1			/* commands per lun */,  | ||||
|      .unchecked_isa_dma 	= 1			/* unchecked_isa_dma */, | ||||
|      .use_clustering    	= ENABLE_CLUSTERING                                | ||||
|      .use_clustering    	= ENABLE_CLUSTERING, | ||||
|      .use_sg_chaining           = ENABLE_SG_CHAINING, | ||||
| }; | ||||
| 
 | ||||
| #include "scsi_module.c" | ||||
|  |  | |||
|  | @ -1071,6 +1071,7 @@ static struct scsi_host_template inia100_template = { | |||
| 	.sg_tablesize		= SG_ALL, | ||||
| 	.cmd_per_lun 		= 1, | ||||
| 	.use_clustering		= ENABLE_CLUSTERING, | ||||
| 	.use_sg_chaining	= ENABLE_SG_CHAINING, | ||||
| }; | ||||
| 
 | ||||
| static int __devinit inia100_probe_one(struct pci_dev *pdev, | ||||
|  |  | |||
|  | @ -944,6 +944,7 @@ static struct scsi_host_template aac_driver_template = { | |||
| 	.cmd_per_lun    		= AAC_NUM_IO_FIB,  | ||||
| #endif	 | ||||
| 	.use_clustering			= ENABLE_CLUSTERING, | ||||
| 	.use_sg_chaining		= ENABLE_SG_CHAINING, | ||||
| 	.emulated                       = 1, | ||||
| }; | ||||
| 
 | ||||
|  |  | |||
|  | @ -61,15 +61,15 @@ static void BAD_DMA(void *address, unsigned int length) | |||
| } | ||||
| 
 | ||||
| static void BAD_SG_DMA(Scsi_Cmnd * SCpnt, | ||||
| 		       struct scatterlist *sgpnt, | ||||
| 		       struct scatterlist *sgp, | ||||
| 		       int nseg, | ||||
| 		       int badseg) | ||||
| { | ||||
| 	printk(KERN_CRIT "sgpnt[%d:%d] page %p/0x%llx length %u\n", | ||||
| 	       badseg, nseg, | ||||
| 	       page_address(sgpnt[badseg].page) + sgpnt[badseg].offset, | ||||
| 	       (unsigned long long)SCSI_SG_PA(&sgpnt[badseg]), | ||||
| 	       sgpnt[badseg].length); | ||||
| 	       page_address(sgp->page) + sgp->offset, | ||||
| 	       (unsigned long long)SCSI_SG_PA(sgp), | ||||
| 	       sgp->length); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Not safe to continue. | ||||
|  | @ -691,7 +691,7 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) | |||
| 	memcpy(ccb[mbo].cdb, cmd, ccb[mbo].cdblen); | ||||
| 
 | ||||
| 	if (SCpnt->use_sg) { | ||||
| 		struct scatterlist *sgpnt; | ||||
| 		struct scatterlist *sg; | ||||
| 		struct chain *cptr; | ||||
| #ifdef DEBUG | ||||
| 		unsigned char *ptr; | ||||
|  | @ -699,23 +699,21 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) | |||
| 		int i; | ||||
| 		ccb[mbo].op = 2;	/* SCSI Initiator Command  w/scatter-gather */ | ||||
| 		SCpnt->host_scribble = kmalloc(512, GFP_KERNEL | GFP_DMA); | ||||
| 		sgpnt = (struct scatterlist *) SCpnt->request_buffer; | ||||
| 		cptr = (struct chain *) SCpnt->host_scribble; | ||||
| 		if (cptr == NULL) { | ||||
| 			/* free the claimed mailbox slot */ | ||||
| 			HOSTDATA(SCpnt->device->host)->SCint[mbo] = NULL; | ||||
| 			return SCSI_MLQUEUE_HOST_BUSY; | ||||
| 		} | ||||
| 		for (i = 0; i < SCpnt->use_sg; i++) { | ||||
| 			if (sgpnt[i].length == 0 || SCpnt->use_sg > 16 || | ||||
| 			    (((int) sgpnt[i].offset) & 1) || (sgpnt[i].length & 1)) { | ||||
| 		scsi_for_each_sg(SCpnt, sg, SCpnt->use_sg, i) { | ||||
| 			if (sg->length == 0 || SCpnt->use_sg > 16 || | ||||
| 			    (((int) sg->offset) & 1) || (sg->length & 1)) { | ||||
| 				unsigned char *ptr; | ||||
| 				printk(KERN_CRIT "Bad segment list supplied to aha1542.c (%d, %d)\n", SCpnt->use_sg, i); | ||||
| 				for (i = 0; i < SCpnt->use_sg; i++) { | ||||
| 				scsi_for_each_sg(SCpnt, sg, SCpnt->use_sg, i) { | ||||
| 					printk(KERN_CRIT "%d: %p %d\n", i, | ||||
| 					       (page_address(sgpnt[i].page) + | ||||
| 						sgpnt[i].offset), | ||||
| 					       sgpnt[i].length); | ||||
| 					       (page_address(sg->page) + | ||||
| 						sg->offset), sg->length); | ||||
| 				}; | ||||
| 				printk(KERN_CRIT "cptr %x: ", (unsigned int) cptr); | ||||
| 				ptr = (unsigned char *) &cptr[i]; | ||||
|  | @ -723,10 +721,10 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) | |||
| 					printk("%02x ", ptr[i]); | ||||
| 				panic("Foooooooood fight!"); | ||||
| 			}; | ||||
| 			any2scsi(cptr[i].dataptr, SCSI_SG_PA(&sgpnt[i])); | ||||
| 			if (SCSI_SG_PA(&sgpnt[i]) + sgpnt[i].length - 1 > ISA_DMA_THRESHOLD) | ||||
| 				BAD_SG_DMA(SCpnt, sgpnt, SCpnt->use_sg, i); | ||||
| 			any2scsi(cptr[i].datalen, sgpnt[i].length); | ||||
| 			any2scsi(cptr[i].dataptr, SCSI_SG_PA(sg)); | ||||
| 			if (SCSI_SG_PA(sg) + sg->length - 1 > ISA_DMA_THRESHOLD) | ||||
| 				BAD_SG_DMA(SCpnt, sg, SCpnt->use_sg, i); | ||||
| 			any2scsi(cptr[i].datalen, sg->length); | ||||
| 		}; | ||||
| 		any2scsi(ccb[mbo].datalen, SCpnt->use_sg * sizeof(struct chain)); | ||||
| 		any2scsi(ccb[mbo].dataptr, SCSI_BUF_PA(cptr)); | ||||
|  |  | |||
|  | @ -563,6 +563,7 @@ static struct scsi_host_template aha1740_template = { | |||
| 	.sg_tablesize     = AHA1740_SCATTER, | ||||
| 	.cmd_per_lun      = AHA1740_CMDLUN, | ||||
| 	.use_clustering   = ENABLE_CLUSTERING, | ||||
| 	.use_sg_chaining  = ENABLE_SG_CHAINING, | ||||
| 	.eh_abort_handler = aha1740_eh_abort_handler, | ||||
| }; | ||||
| 
 | ||||
|  |  | |||
|  | @ -766,6 +766,7 @@ struct scsi_host_template aic79xx_driver_template = { | |||
| 	.max_sectors		= 8192, | ||||
| 	.cmd_per_lun		= 2, | ||||
| 	.use_clustering		= ENABLE_CLUSTERING, | ||||
| 	.use_sg_chaining	= ENABLE_SG_CHAINING, | ||||
| 	.slave_alloc		= ahd_linux_slave_alloc, | ||||
| 	.slave_configure	= ahd_linux_slave_configure, | ||||
| 	.target_alloc		= ahd_linux_target_alloc, | ||||
|  |  | |||
|  | @ -747,6 +747,7 @@ struct scsi_host_template aic7xxx_driver_template = { | |||
| 	.max_sectors		= 8192, | ||||
| 	.cmd_per_lun		= 2, | ||||
| 	.use_clustering		= ENABLE_CLUSTERING, | ||||
| 	.use_sg_chaining	= ENABLE_SG_CHAINING, | ||||
| 	.slave_alloc		= ahc_linux_slave_alloc, | ||||
| 	.slave_configure	= ahc_linux_slave_configure, | ||||
| 	.target_alloc		= ahc_linux_target_alloc, | ||||
|  |  | |||
|  | @ -11142,6 +11142,7 @@ static struct scsi_host_template driver_template = { | |||
| 	.max_sectors		= 2048, | ||||
| 	.cmd_per_lun		= 3, | ||||
| 	.use_clustering		= ENABLE_CLUSTERING, | ||||
| 	.use_sg_chaining	= ENABLE_SG_CHAINING, | ||||
| }; | ||||
| 
 | ||||
| #include "scsi_module.c" | ||||
|  |  | |||
|  | @ -94,7 +94,7 @@ static inline int asd_map_scatterlist(struct sas_task *task, | |||
| 			res = -ENOMEM; | ||||
| 			goto err_unmap; | ||||
| 		} | ||||
| 		for (sc = task->scatter, i = 0; i < num_sg; i++, sc++) { | ||||
| 		for_each_sg(task->scatter, sc, num_sg, i) { | ||||
| 			struct sg_el *sg = | ||||
| 				&((struct sg_el *)ascb->sg_arr->vaddr)[i]; | ||||
| 			sg->bus_addr = cpu_to_le64((u64)sg_dma_address(sc)); | ||||
|  | @ -103,7 +103,7 @@ static inline int asd_map_scatterlist(struct sas_task *task, | |||
| 				sg->flags |= ASD_SG_EL_LIST_EOL; | ||||
| 		} | ||||
| 
 | ||||
| 		for (sc = task->scatter, i = 0; i < 2; i++, sc++) { | ||||
| 		for_each_sg(task->scatter, sc, 2, i) { | ||||
| 			sg_arr[i].bus_addr = | ||||
| 				cpu_to_le64((u64)sg_dma_address(sc)); | ||||
| 			sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc)); | ||||
|  | @ -115,7 +115,7 @@ static inline int asd_map_scatterlist(struct sas_task *task, | |||
| 		sg_arr[2].bus_addr=cpu_to_le64((u64)ascb->sg_arr->dma_handle); | ||||
| 	} else { | ||||
| 		int i; | ||||
| 		for (sc = task->scatter, i = 0; i < num_sg; i++, sc++) { | ||||
| 		for_each_sg(task->scatter, sc, num_sg, i) { | ||||
| 			sg_arr[i].bus_addr = | ||||
| 				cpu_to_le64((u64)sg_dma_address(sc)); | ||||
| 			sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc)); | ||||
|  |  | |||
|  | @ -122,6 +122,7 @@ static struct scsi_host_template arcmsr_scsi_host_template = { | |||
| 	.max_sectors    	= ARCMSR_MAX_XFER_SECTORS, | ||||
| 	.cmd_per_lun		= ARCMSR_MAX_CMD_PERLUN, | ||||
| 	.use_clustering		= ENABLE_CLUSTERING, | ||||
| 	.use_sg_chaining	= ENABLE_SG_CHAINING, | ||||
| 	.shost_attrs		= arcmsr_host_attrs, | ||||
| }; | ||||
| #ifdef CONFIG_SCSI_ARCMSR_AER | ||||
|  |  | |||
|  | @ -4765,6 +4765,7 @@ static struct scsi_host_template dc395x_driver_template = { | |||
| 	.eh_bus_reset_handler   = dc395x_eh_bus_reset, | ||||
| 	.unchecked_isa_dma      = 0, | ||||
| 	.use_clustering         = DISABLE_CLUSTERING, | ||||
| 	.use_sg_chaining	= ENABLE_SG_CHAINING, | ||||
| }; | ||||
| 
 | ||||
| 
 | ||||
|  |  | |||
|  | @ -3295,6 +3295,7 @@ static struct scsi_host_template adpt_template = { | |||
| 	.this_id		= 7, | ||||
| 	.cmd_per_lun		= 1, | ||||
| 	.use_clustering		= ENABLE_CLUSTERING, | ||||
| 	.use_sg_chaining	= ENABLE_SG_CHAINING, | ||||
| }; | ||||
| 
 | ||||
| static s32 adpt_scsi_register(adpt_hba* pHba) | ||||
|  |  | |||
|  | @ -523,7 +523,8 @@ static struct scsi_host_template driver_template = { | |||
| 	.slave_configure = eata2x_slave_configure, | ||||
| 	.this_id = 7, | ||||
| 	.unchecked_isa_dma = 1, | ||||
| 	.use_clustering = ENABLE_CLUSTERING | ||||
| 	.use_clustering = ENABLE_CLUSTERING, | ||||
| 	.use_sg_chaining = ENABLE_SG_CHAINING, | ||||
| }; | ||||
| 
 | ||||
| #if !defined(__BIG_ENDIAN_BITFIELD) && !defined(__LITTLE_ENDIAN_BITFIELD) | ||||
|  |  | |||
|  | @ -343,6 +343,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) | |||
| 	shost->use_clustering = sht->use_clustering; | ||||
| 	shost->ordered_tag = sht->ordered_tag; | ||||
| 	shost->active_mode = sht->supported_mode; | ||||
| 	shost->use_sg_chaining = sht->use_sg_chaining; | ||||
| 
 | ||||
| 	if (sht->max_host_blocked) | ||||
| 		shost->max_host_blocked = sht->max_host_blocked; | ||||
|  |  | |||
|  | @ -655,6 +655,7 @@ static struct scsi_host_template driver_template = { | |||
| 	.unchecked_isa_dma          = 0, | ||||
| 	.emulated                   = 0, | ||||
| 	.use_clustering             = ENABLE_CLUSTERING, | ||||
| 	.use_sg_chaining            = ENABLE_SG_CHAINING, | ||||
| 	.proc_name                  = driver_name, | ||||
| 	.shost_attrs                = hptiop_attrs, | ||||
| 	.this_id                    = -1, | ||||
|  |  | |||
|  | @ -1501,6 +1501,7 @@ static struct scsi_host_template ibmmca_driver_template = { | |||
|           .sg_tablesize   = 16, | ||||
|           .cmd_per_lun    = 1, | ||||
|           .use_clustering = ENABLE_CLUSTERING, | ||||
|           .use_sg_chaining = ENABLE_SG_CHAINING, | ||||
| }; | ||||
| 
 | ||||
| static int ibmmca_probe(struct device *dev) | ||||
|  |  | |||
|  | @ -1548,6 +1548,7 @@ static struct scsi_host_template driver_template = { | |||
| 	.this_id = -1, | ||||
| 	.sg_tablesize = SG_ALL, | ||||
| 	.use_clustering = ENABLE_CLUSTERING, | ||||
| 	.use_sg_chaining = ENABLE_SG_CHAINING, | ||||
| 	.shost_attrs = ibmvscsi_attrs, | ||||
| }; | ||||
| 
 | ||||
|  |  | |||
|  | @ -70,6 +70,7 @@ typedef struct idescsi_pc_s { | |||
| 	u8 *buffer;				/* Data buffer */ | ||||
| 	u8 *current_position;			/* Pointer into the above buffer */ | ||||
| 	struct scatterlist *sg;			/* Scatter gather table */ | ||||
| 	struct scatterlist *last_sg;		/* Last sg element */ | ||||
| 	int b_count;				/* Bytes transferred from current entry */ | ||||
| 	struct scsi_cmnd *scsi_cmd;		/* SCSI command */ | ||||
| 	void (*done)(struct scsi_cmnd *);	/* Scsi completion routine */ | ||||
|  | @ -173,12 +174,6 @@ static void idescsi_input_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigne | |||
| 	char *buf; | ||||
| 
 | ||||
| 	while (bcount) { | ||||
| 		if (pc->sg - scsi_sglist(pc->scsi_cmd) > | ||||
| 		                                 scsi_sg_count(pc->scsi_cmd)) { | ||||
| 			printk (KERN_ERR "ide-scsi: scatter gather table too small, discarding data\n"); | ||||
| 			idescsi_discard_data (drive, bcount); | ||||
| 			return; | ||||
| 		} | ||||
| 		count = min(pc->sg->length - pc->b_count, bcount); | ||||
| 		if (PageHighMem(pc->sg->page)) { | ||||
| 			unsigned long flags; | ||||
|  | @ -197,10 +192,17 @@ static void idescsi_input_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigne | |||
| 		} | ||||
| 		bcount -= count; pc->b_count += count; | ||||
| 		if (pc->b_count == pc->sg->length) { | ||||
| 			pc->sg++; | ||||
| 			if (pc->sg == pc->last_sg) | ||||
| 				break; | ||||
| 			pc->sg = sg_next(pc->sg); | ||||
| 			pc->b_count = 0; | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if (bcount) { | ||||
| 		printk (KERN_ERR "ide-scsi: scatter gather table too small, discarding data\n"); | ||||
| 		idescsi_discard_data (drive, bcount); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigned int bcount) | ||||
|  | @ -209,12 +211,6 @@ static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsign | |||
| 	char *buf; | ||||
| 
 | ||||
| 	while (bcount) { | ||||
| 		if (pc->sg - scsi_sglist(pc->scsi_cmd) > | ||||
| 		                                 scsi_sg_count(pc->scsi_cmd)) { | ||||
| 			printk (KERN_ERR "ide-scsi: scatter gather table too small, padding with zeros\n"); | ||||
| 			idescsi_output_zeros (drive, bcount); | ||||
| 			return; | ||||
| 		} | ||||
| 		count = min(pc->sg->length - pc->b_count, bcount); | ||||
| 		if (PageHighMem(pc->sg->page)) { | ||||
| 			unsigned long flags; | ||||
|  | @ -233,10 +229,17 @@ static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsign | |||
| 		} | ||||
| 		bcount -= count; pc->b_count += count; | ||||
| 		if (pc->b_count == pc->sg->length) { | ||||
| 			pc->sg++; | ||||
| 			if (pc->sg == pc->last_sg) | ||||
| 				break; | ||||
| 			pc->sg = sg_next(pc->sg); | ||||
| 			pc->b_count = 0; | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if (bcount) { | ||||
| 		printk (KERN_ERR "ide-scsi: scatter gather table too small, padding with zeros\n"); | ||||
| 		idescsi_output_zeros (drive, bcount); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void hexdump(u8 *x, int len) | ||||
|  | @ -804,6 +807,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd, | |||
| 	memcpy (pc->c, cmd->cmnd, cmd->cmd_len); | ||||
| 	pc->buffer = NULL; | ||||
| 	pc->sg = scsi_sglist(cmd); | ||||
| 	pc->last_sg = sg_last(pc->sg, cmd->use_sg); | ||||
| 	pc->b_count = 0; | ||||
| 	pc->request_transfer = pc->buffer_size = scsi_bufflen(cmd); | ||||
| 	pc->scsi_cmd = cmd; | ||||
|  |  | |||
|  | @ -2831,6 +2831,7 @@ static struct scsi_host_template initio_template = { | |||
| 	.sg_tablesize		= SG_ALL, | ||||
| 	.cmd_per_lun		= 1, | ||||
| 	.use_clustering		= ENABLE_CLUSTERING, | ||||
| 	.use_sg_chaining	= ENABLE_SG_CHAINING, | ||||
| }; | ||||
| 
 | ||||
| static int initio_probe_one(struct pci_dev *pdev, | ||||
|  |  | |||
|  | @ -3252,7 +3252,7 @@ ips_done(ips_ha_t * ha, ips_scb_t * scb) | |||
| 		 */ | ||||
| 		if ((scb->breakup) || (scb->sg_break)) { | ||||
|                         struct scatterlist *sg; | ||||
|                         int sg_dma_index, ips_sg_index = 0; | ||||
|                         int i, sg_dma_index, ips_sg_index = 0; | ||||
| 
 | ||||
| 			/* we had a data breakup */ | ||||
| 			scb->data_len = 0; | ||||
|  | @ -3261,20 +3261,22 @@ ips_done(ips_ha_t * ha, ips_scb_t * scb) | |||
| 
 | ||||
|                         /* Spin forward to last dma chunk */ | ||||
|                         sg_dma_index = scb->breakup; | ||||
|                         for (i = 0; i < scb->breakup; i++) | ||||
|                                 sg = sg_next(sg); | ||||
| 
 | ||||
| 			/* Take care of possible partial on last chunk */ | ||||
|                         ips_fill_scb_sg_single(ha, | ||||
|                                                sg_dma_address(&sg[sg_dma_index]), | ||||
|                                                sg_dma_address(sg), | ||||
|                                                scb, ips_sg_index++, | ||||
|                                                sg_dma_len(&sg[sg_dma_index])); | ||||
|                                                sg_dma_len(sg)); | ||||
| 
 | ||||
|                         for (; sg_dma_index < scsi_sg_count(scb->scsi_cmd); | ||||
|                              sg_dma_index++) { | ||||
|                              sg_dma_index++, sg = sg_next(sg)) { | ||||
|                                 if (ips_fill_scb_sg_single | ||||
|                                     (ha, | ||||
|                                      sg_dma_address(&sg[sg_dma_index]), | ||||
|                                      sg_dma_address(sg), | ||||
|                                      scb, ips_sg_index++, | ||||
|                                      sg_dma_len(&sg[sg_dma_index])) < 0) | ||||
|                                      sg_dma_len(sg)) < 0) | ||||
|                                         break; | ||||
|                         } | ||||
| 
 | ||||
|  |  | |||
|  | @ -1438,6 +1438,7 @@ struct scsi_host_template lpfc_template = { | |||
| 	.scan_finished		= lpfc_scan_finished, | ||||
| 	.this_id		= -1, | ||||
| 	.sg_tablesize		= LPFC_SG_SEG_CNT, | ||||
| 	.use_sg_chaining	= ENABLE_SG_CHAINING, | ||||
| 	.cmd_per_lun		= LPFC_CMD_PER_LUN, | ||||
| 	.use_clustering		= ENABLE_CLUSTERING, | ||||
| 	.shost_attrs		= lpfc_hba_attrs, | ||||
|  | @ -1460,6 +1461,7 @@ struct scsi_host_template lpfc_vport_template = { | |||
| 	.sg_tablesize		= LPFC_SG_SEG_CNT, | ||||
| 	.cmd_per_lun		= LPFC_CMD_PER_LUN, | ||||
| 	.use_clustering		= ENABLE_CLUSTERING, | ||||
| 	.use_sg_chaining	= ENABLE_SG_CHAINING, | ||||
| 	.shost_attrs		= lpfc_vport_attrs, | ||||
| 	.max_sectors		= 0xFFFF, | ||||
| }; | ||||
|  |  | |||
|  | @ -402,6 +402,7 @@ static struct scsi_host_template mac53c94_template = { | |||
| 	.sg_tablesize	= SG_ALL, | ||||
| 	.cmd_per_lun	= 1, | ||||
| 	.use_clustering	= DISABLE_CLUSTERING, | ||||
| 	.use_sg_chaining = ENABLE_SG_CHAINING, | ||||
| }; | ||||
| 
 | ||||
| static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *match) | ||||
|  |  | |||
|  | @ -4492,6 +4492,7 @@ static struct scsi_host_template megaraid_template = { | |||
| 	.sg_tablesize			= MAX_SGLIST, | ||||
| 	.cmd_per_lun			= DEF_CMD_PER_LUN, | ||||
| 	.use_clustering			= ENABLE_CLUSTERING, | ||||
| 	.use_sg_chaining		= ENABLE_SG_CHAINING, | ||||
| 	.eh_abort_handler		= megaraid_abort, | ||||
| 	.eh_device_reset_handler	= megaraid_reset, | ||||
| 	.eh_bus_reset_handler		= megaraid_reset, | ||||
|  |  | |||
|  | @ -361,6 +361,7 @@ static struct scsi_host_template megaraid_template_g = { | |||
| 	.eh_host_reset_handler		= megaraid_reset_handler, | ||||
| 	.change_queue_depth		= megaraid_change_queue_depth, | ||||
| 	.use_clustering			= ENABLE_CLUSTERING, | ||||
| 	.use_sg_chaining		= ENABLE_SG_CHAINING, | ||||
| 	.sdev_attrs			= megaraid_sdev_attrs, | ||||
| 	.shost_attrs			= megaraid_shost_attrs, | ||||
| }; | ||||
|  |  | |||
|  | @ -1110,6 +1110,7 @@ static struct scsi_host_template megasas_template = { | |||
| 	.eh_timed_out = megasas_reset_timer, | ||||
| 	.bios_param = megasas_bios_param, | ||||
| 	.use_clustering = ENABLE_CLUSTERING, | ||||
| 	.use_sg_chaining = ENABLE_SG_CHAINING, | ||||
| }; | ||||
| 
 | ||||
| /**
 | ||||
|  |  | |||
|  | @ -1843,6 +1843,7 @@ static struct scsi_host_template mesh_template = { | |||
| 	.sg_tablesize			= SG_ALL, | ||||
| 	.cmd_per_lun			= 2, | ||||
| 	.use_clustering			= DISABLE_CLUSTERING, | ||||
| 	.use_sg_chaining		= ENABLE_SG_CHAINING, | ||||
| }; | ||||
| 
 | ||||
| static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match) | ||||
|  |  | |||
|  | @ -281,6 +281,7 @@ static struct scsi_host_template nsp32_template = { | |||
| 	.cmd_per_lun			= 1, | ||||
| 	.this_id			= NSP32_HOST_SCSIID, | ||||
| 	.use_clustering			= DISABLE_CLUSTERING, | ||||
| 	.use_sg_chaining		= ENABLE_SG_CHAINING, | ||||
| 	.eh_abort_handler       	= nsp32_eh_abort, | ||||
| 	.eh_bus_reset_handler		= nsp32_eh_bus_reset, | ||||
| 	.eh_host_reset_handler		= nsp32_eh_host_reset, | ||||
|  |  | |||
|  | @ -694,6 +694,7 @@ static struct scsi_host_template sym53c500_driver_template = { | |||
|      .sg_tablesize		= 32, | ||||
|      .cmd_per_lun		= 1, | ||||
|      .use_clustering		= ENABLE_CLUSTERING, | ||||
|      .use_sg_chaining		= ENABLE_SG_CHAINING, | ||||
|      .shost_attrs		= SYM53C500_shost_attrs | ||||
| }; | ||||
| 
 | ||||
|  |  | |||
|  | @ -2775,7 +2775,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) | |||
| 	struct device_reg __iomem *reg = ha->iobase; | ||||
| 	struct scsi_cmnd *cmd = sp->cmd; | ||||
| 	cmd_a64_entry_t *pkt; | ||||
| 	struct scatterlist *sg = NULL; | ||||
| 	struct scatterlist *sg = NULL, *s; | ||||
| 	__le32 *dword_ptr; | ||||
| 	dma_addr_t dma_handle; | ||||
| 	int status = 0; | ||||
|  | @ -2889,13 +2889,16 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) | |||
| 	 * Load data segments. | ||||
| 	 */ | ||||
| 	if (seg_cnt) {	/* If data transfer. */ | ||||
| 		int remseg = seg_cnt; | ||||
| 		/* Setup packet address segment pointer. */ | ||||
| 		dword_ptr = (u32 *)&pkt->dseg_0_address; | ||||
| 
 | ||||
| 		if (cmd->use_sg) {	/* If scatter gather */ | ||||
| 			/* Load command entry data segments. */ | ||||
| 			for (cnt = 0; cnt < 2 && seg_cnt; cnt++, seg_cnt--) { | ||||
| 				dma_handle = sg_dma_address(sg); | ||||
| 			for_each_sg(sg, s, seg_cnt, cnt) { | ||||
| 				if (cnt == 2) | ||||
| 					break; | ||||
| 				dma_handle = sg_dma_address(s); | ||||
| #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) | ||||
| 				if (ha->flags.use_pci_vchannel) | ||||
| 					sn_pci_set_vchan(ha->pdev, | ||||
|  | @ -2906,12 +2909,12 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) | |||
| 					cpu_to_le32(pci_dma_lo32(dma_handle)); | ||||
| 				*dword_ptr++ = | ||||
| 					cpu_to_le32(pci_dma_hi32(dma_handle)); | ||||
| 				*dword_ptr++ = cpu_to_le32(sg_dma_len(sg)); | ||||
| 				sg++; | ||||
| 				*dword_ptr++ = cpu_to_le32(sg_dma_len(s)); | ||||
| 				dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n", | ||||
| 					cpu_to_le32(pci_dma_hi32(dma_handle)), | ||||
| 					cpu_to_le32(pci_dma_lo32(dma_handle)), | ||||
| 					cpu_to_le32(sg_dma_len(sg))); | ||||
| 					cpu_to_le32(sg_dma_len(sg_next(s)))); | ||||
| 				remseg--; | ||||
| 			} | ||||
| 			dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather " | ||||
| 				"command packet data - b %i, t %i, l %i \n", | ||||
|  | @ -2926,7 +2929,9 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) | |||
| 			dprintk(3, "S/G Building Continuation...seg_cnt=0x%x " | ||||
| 				"remains\n", seg_cnt); | ||||
| 
 | ||||
| 			while (seg_cnt > 0) { | ||||
| 			while (remseg > 0) { | ||||
| 				/* Update sg start */ | ||||
| 				sg = s; | ||||
| 				/* Adjust ring index. */ | ||||
| 				ha->req_ring_index++; | ||||
| 				if (ha->req_ring_index == REQUEST_ENTRY_CNT) { | ||||
|  | @ -2952,9 +2957,10 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) | |||
| 					(u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address; | ||||
| 
 | ||||
| 				/* Load continuation entry data segments. */ | ||||
| 				for (cnt = 0; cnt < 5 && seg_cnt; | ||||
| 				     cnt++, seg_cnt--) { | ||||
| 					dma_handle = sg_dma_address(sg); | ||||
| 				for_each_sg(sg, s, remseg, cnt) { | ||||
| 					if (cnt == 5) | ||||
| 						break; | ||||
| 					dma_handle = sg_dma_address(s); | ||||
| #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) | ||||
| 				if (ha->flags.use_pci_vchannel) | ||||
| 					sn_pci_set_vchan(ha->pdev,  | ||||
|  | @ -2966,13 +2972,13 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) | |||
| 					*dword_ptr++ = | ||||
| 						cpu_to_le32(pci_dma_hi32(dma_handle)); | ||||
| 					*dword_ptr++ = | ||||
| 						cpu_to_le32(sg_dma_len(sg)); | ||||
| 						cpu_to_le32(sg_dma_len(s)); | ||||
| 					dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n", | ||||
| 						cpu_to_le32(pci_dma_hi32(dma_handle)), | ||||
| 						cpu_to_le32(pci_dma_lo32(dma_handle)), | ||||
| 						cpu_to_le32(sg_dma_len(sg))); | ||||
| 					sg++; | ||||
| 						cpu_to_le32(sg_dma_len(s))); | ||||
| 				} | ||||
| 				remseg -= cnt; | ||||
| 				dprintk(5, "qla1280_64bit_start_scsi: " | ||||
| 					"continuation packet data - b %i, t " | ||||
| 					"%i, l %i \n", SCSI_BUS_32(cmd), | ||||
|  | @ -3062,7 +3068,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) | |||
| 	struct device_reg __iomem *reg = ha->iobase; | ||||
| 	struct scsi_cmnd *cmd = sp->cmd; | ||||
| 	struct cmd_entry *pkt; | ||||
| 	struct scatterlist *sg = NULL; | ||||
| 	struct scatterlist *sg = NULL, *s; | ||||
| 	__le32 *dword_ptr; | ||||
| 	int status = 0; | ||||
| 	int cnt; | ||||
|  | @ -3188,6 +3194,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) | |||
| 	 * Load data segments. | ||||
| 	 */ | ||||
| 	if (seg_cnt) { | ||||
| 		int remseg = seg_cnt; | ||||
| 		/* Setup packet address segment pointer. */ | ||||
| 		dword_ptr = &pkt->dseg_0_address; | ||||
| 
 | ||||
|  | @ -3196,22 +3203,25 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) | |||
| 			qla1280_dump_buffer(1, (char *)sg, 4 * 16); | ||||
| 
 | ||||
| 			/* Load command entry data segments. */ | ||||
| 			for (cnt = 0; cnt < 4 && seg_cnt; cnt++, seg_cnt--) { | ||||
| 			for_each_sg(sg, s, seg_cnt, cnt) { | ||||
| 				if (cnt == 4) | ||||
| 					break; | ||||
| 				*dword_ptr++ = | ||||
| 					cpu_to_le32(pci_dma_lo32(sg_dma_address(sg))); | ||||
| 				*dword_ptr++ = | ||||
| 					cpu_to_le32(sg_dma_len(sg)); | ||||
| 					cpu_to_le32(pci_dma_lo32(sg_dma_address(s))); | ||||
| 				*dword_ptr++ = cpu_to_le32(sg_dma_len(s)); | ||||
| 				dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n", | ||||
| 					(pci_dma_lo32(sg_dma_address(sg))), | ||||
| 					(sg_dma_len(sg))); | ||||
| 				sg++; | ||||
| 					(pci_dma_lo32(sg_dma_address(s))), | ||||
| 					(sg_dma_len(s))); | ||||
| 				remseg--; | ||||
| 			} | ||||
| 			/*
 | ||||
| 			 * Build continuation packets. | ||||
| 			 */ | ||||
| 			dprintk(3, "S/G Building Continuation" | ||||
| 				"...seg_cnt=0x%x remains\n", seg_cnt); | ||||
| 			while (seg_cnt > 0) { | ||||
| 			while (remseg > 0) { | ||||
| 				/* Continue from end point */ | ||||
| 				sg = s; | ||||
| 				/* Adjust ring index. */ | ||||
| 				ha->req_ring_index++; | ||||
| 				if (ha->req_ring_index == REQUEST_ENTRY_CNT) { | ||||
|  | @ -3239,19 +3249,20 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) | |||
| 					&((struct cont_entry *) pkt)->dseg_0_address; | ||||
| 
 | ||||
| 				/* Load continuation entry data segments. */ | ||||
| 				for (cnt = 0; cnt < 7 && seg_cnt; | ||||
| 				     cnt++, seg_cnt--) { | ||||
| 				for_each_sg(sg, s, remseg, cnt) { | ||||
| 					if (cnt == 7) | ||||
| 						break; | ||||
| 					*dword_ptr++ = | ||||
| 						cpu_to_le32(pci_dma_lo32(sg_dma_address(sg))); | ||||
| 						cpu_to_le32(pci_dma_lo32(sg_dma_address(s))); | ||||
| 					*dword_ptr++ = | ||||
| 						cpu_to_le32(sg_dma_len(sg)); | ||||
| 						cpu_to_le32(sg_dma_len(s)); | ||||
| 					dprintk(1, | ||||
| 						"S/G Segment Cont. phys_addr=0x%x, " | ||||
| 						"len=0x%x\n", | ||||
| 						cpu_to_le32(pci_dma_lo32(sg_dma_address(sg))), | ||||
| 						cpu_to_le32(sg_dma_len(sg))); | ||||
| 					sg++; | ||||
| 						cpu_to_le32(pci_dma_lo32(sg_dma_address(s))), | ||||
| 						cpu_to_le32(sg_dma_len(s))); | ||||
| 				} | ||||
| 				remseg -= cnt; | ||||
| 				dprintk(5, "qla1280_32bit_start_scsi: " | ||||
| 					"continuation packet data - " | ||||
| 					"scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd), | ||||
|  | @ -4248,6 +4259,7 @@ static struct scsi_host_template qla1280_driver_template = { | |||
| 	.sg_tablesize		= SG_ALL, | ||||
| 	.cmd_per_lun		= 1, | ||||
| 	.use_clustering		= ENABLE_CLUSTERING, | ||||
| 	.use_sg_chaining	= ENABLE_SG_CHAINING, | ||||
| }; | ||||
| 
 | ||||
| 
 | ||||
|  |  | |||
|  | @ -132,6 +132,7 @@ struct scsi_host_template qla2x00_driver_template = { | |||
| 	.this_id		= -1, | ||||
| 	.cmd_per_lun		= 3, | ||||
| 	.use_clustering		= ENABLE_CLUSTERING, | ||||
| 	.use_sg_chaining	= ENABLE_SG_CHAINING, | ||||
| 	.sg_tablesize		= SG_ALL, | ||||
| 
 | ||||
| 	/*
 | ||||
|  | @ -163,6 +164,7 @@ struct scsi_host_template qla24xx_driver_template = { | |||
| 	.this_id		= -1, | ||||
| 	.cmd_per_lun		= 3, | ||||
| 	.use_clustering		= ENABLE_CLUSTERING, | ||||
| 	.use_sg_chaining	= ENABLE_SG_CHAINING, | ||||
| 	.sg_tablesize		= SG_ALL, | ||||
| 
 | ||||
| 	.max_sectors		= 0xFFFF, | ||||
|  |  | |||
|  | @ -94,6 +94,7 @@ static struct scsi_host_template qla4xxx_driver_template = { | |||
| 	.this_id		= -1, | ||||
| 	.cmd_per_lun		= 3, | ||||
| 	.use_clustering		= ENABLE_CLUSTERING, | ||||
| 	.use_sg_chaining	= ENABLE_SG_CHAINING, | ||||
| 	.sg_tablesize		= SG_ALL, | ||||
| 
 | ||||
| 	.max_sectors		= 0xFFFF, | ||||
|  |  | |||
|  | @ -197,6 +197,7 @@ static struct scsi_host_template qlogicfas_driver_template = { | |||
| 	.sg_tablesize		= SG_ALL, | ||||
| 	.cmd_per_lun		= 1, | ||||
| 	.use_clustering		= DISABLE_CLUSTERING, | ||||
| 	.use_sg_chaining	= ENABLE_SG_CHAINING, | ||||
| }; | ||||
| 
 | ||||
| static __init int qlogicfas_init(void) | ||||
|  |  | |||
|  | @ -868,7 +868,7 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd, | |||
| 			   struct qlogicpti *qpti, u_int in_ptr, u_int out_ptr) | ||||
| { | ||||
| 	struct dataseg *ds; | ||||
| 	struct scatterlist *sg; | ||||
| 	struct scatterlist *sg, *s; | ||||
| 	int i, n; | ||||
| 
 | ||||
| 	if (Cmnd->use_sg) { | ||||
|  | @ -884,11 +884,12 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd, | |||
| 		n = sg_count; | ||||
| 		if (n > 4) | ||||
| 			n = 4; | ||||
| 		for (i = 0; i < n; i++, sg++) { | ||||
| 			ds[i].d_base = sg_dma_address(sg); | ||||
| 			ds[i].d_count = sg_dma_len(sg); | ||||
| 		for_each_sg(sg, s, n, i) { | ||||
| 			ds[i].d_base = sg_dma_address(s); | ||||
| 			ds[i].d_count = sg_dma_len(s); | ||||
| 		} | ||||
| 		sg_count -= 4; | ||||
| 		sg = s; | ||||
| 		while (sg_count > 0) { | ||||
| 			struct Continuation_Entry *cont; | ||||
| 
 | ||||
|  | @ -907,9 +908,9 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd, | |||
| 			n = sg_count; | ||||
| 			if (n > 7) | ||||
| 				n = 7; | ||||
| 			for (i = 0; i < n; i++, sg++) { | ||||
| 				ds[i].d_base = sg_dma_address(sg); | ||||
| 				ds[i].d_count = sg_dma_len(sg); | ||||
| 			for_each_sg(sg, s, n, i) { | ||||
| 				ds[i].d_base = sg_dma_address(s); | ||||
| 				ds[i].d_count = sg_dma_len(s); | ||||
| 			} | ||||
| 			sg_count -= n; | ||||
| 		} | ||||
|  |  | |||
|  | @ -38,6 +38,7 @@ | |||
| #include <linux/proc_fs.h> | ||||
| #include <linux/vmalloc.h> | ||||
| #include <linux/moduleparam.h> | ||||
| #include <linux/scatterlist.h> | ||||
| 
 | ||||
| #include <linux/blkdev.h> | ||||
| #include "scsi.h" | ||||
|  | @ -600,7 +601,7 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr, | |||
| 	int k, req_len, act_len, len, active; | ||||
| 	void * kaddr; | ||||
| 	void * kaddr_off; | ||||
| 	struct scatterlist * sgpnt; | ||||
| 	struct scatterlist * sg; | ||||
| 
 | ||||
| 	if (0 == scp->request_bufflen) | ||||
| 		return 0; | ||||
|  | @ -619,16 +620,16 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr, | |||
| 			scp->resid = req_len - act_len; | ||||
| 		return 0; | ||||
| 	} | ||||
| 	sgpnt = (struct scatterlist *)scp->request_buffer; | ||||
| 	active = 1; | ||||
| 	for (k = 0, req_len = 0, act_len = 0; k < scp->use_sg; ++k, ++sgpnt) { | ||||
| 	req_len = act_len = 0; | ||||
| 	scsi_for_each_sg(scp, sg, scp->use_sg, k) { | ||||
| 		if (active) { | ||||
| 			kaddr = (unsigned char *) | ||||
| 				kmap_atomic(sgpnt->page, KM_USER0); | ||||
| 				kmap_atomic(sg->page, KM_USER0); | ||||
| 			if (NULL == kaddr) | ||||
| 				return (DID_ERROR << 16); | ||||
| 			kaddr_off = (unsigned char *)kaddr + sgpnt->offset; | ||||
| 			len = sgpnt->length; | ||||
| 			kaddr_off = (unsigned char *)kaddr + sg->offset; | ||||
| 			len = sg->length; | ||||
| 			if ((req_len + len) > arr_len) { | ||||
| 				active = 0; | ||||
| 				len = arr_len - req_len; | ||||
|  | @ -637,7 +638,7 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr, | |||
| 			kunmap_atomic(kaddr, KM_USER0); | ||||
| 			act_len += len; | ||||
| 		} | ||||
| 		req_len += sgpnt->length; | ||||
| 		req_len += sg->length; | ||||
| 	} | ||||
| 	if (scp->resid) | ||||
| 		scp->resid -= act_len; | ||||
|  | @ -653,7 +654,7 @@ static int fetch_to_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr, | |||
| 	int k, req_len, len, fin; | ||||
| 	void * kaddr; | ||||
| 	void * kaddr_off; | ||||
| 	struct scatterlist * sgpnt; | ||||
| 	struct scatterlist * sg; | ||||
| 
 | ||||
| 	if (0 == scp->request_bufflen) | ||||
| 		return 0; | ||||
|  | @ -668,13 +669,14 @@ static int fetch_to_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr, | |||
| 		memcpy(arr, scp->request_buffer, len); | ||||
| 		return len; | ||||
| 	} | ||||
| 	sgpnt = (struct scatterlist *)scp->request_buffer; | ||||
| 	for (k = 0, req_len = 0, fin = 0; k < scp->use_sg; ++k, ++sgpnt) { | ||||
| 		kaddr = (unsigned char *)kmap_atomic(sgpnt->page, KM_USER0); | ||||
| 	sg = scsi_sglist(scp); | ||||
| 	req_len = fin = 0; | ||||
| 	for (k = 0; k < scp->use_sg; ++k, sg = sg_next(sg)) { | ||||
| 		kaddr = (unsigned char *)kmap_atomic(sg->page, KM_USER0); | ||||
| 		if (NULL == kaddr) | ||||
| 			return -1; | ||||
| 		kaddr_off = (unsigned char *)kaddr + sgpnt->offset; | ||||
| 		len = sgpnt->length; | ||||
| 		kaddr_off = (unsigned char *)kaddr + sg->offset; | ||||
| 		len = sg->length; | ||||
| 		if ((req_len + len) > max_arr_len) { | ||||
| 			len = max_arr_len - req_len; | ||||
| 			fin = 1; | ||||
|  | @ -683,7 +685,7 @@ static int fetch_to_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr, | |||
| 		kunmap_atomic(kaddr, KM_USER0); | ||||
| 		if (fin) | ||||
| 			return req_len + len; | ||||
| 		req_len += sgpnt->length; | ||||
| 		req_len += sg->length; | ||||
| 	} | ||||
| 	return req_len; | ||||
| } | ||||
|  |  | |||
|  | @ -17,6 +17,7 @@ | |||
| #include <linux/pci.h> | ||||
| #include <linux/delay.h> | ||||
| #include <linux/hardirq.h> | ||||
| #include <linux/scatterlist.h> | ||||
| 
 | ||||
| #include <scsi/scsi.h> | ||||
| #include <scsi/scsi_cmnd.h> | ||||
|  | @ -33,35 +34,34 @@ | |||
| #define SG_MEMPOOL_NR		ARRAY_SIZE(scsi_sg_pools) | ||||
| #define SG_MEMPOOL_SIZE		2 | ||||
| 
 | ||||
| /*
 | ||||
|  * The maximum number of SG segments that we will put inside a scatterlist | ||||
|  * (unless chaining is used). Should ideally fit inside a single page, to | ||||
|  * avoid a higher order allocation. | ||||
|  */ | ||||
| #define SCSI_MAX_SG_SEGMENTS	128 | ||||
| 
 | ||||
| struct scsi_host_sg_pool { | ||||
| 	size_t		size; | ||||
| 	char		*name;  | ||||
| 	char		*name; | ||||
| 	struct kmem_cache	*slab; | ||||
| 	mempool_t	*pool; | ||||
| }; | ||||
| 
 | ||||
| #if (SCSI_MAX_PHYS_SEGMENTS < 32) | ||||
| #error SCSI_MAX_PHYS_SEGMENTS is too small | ||||
| #endif | ||||
| 
 | ||||
| #define SP(x) { x, "sgpool-" #x }  | ||||
| #define SP(x) { x, "sgpool-" #x } | ||||
| static struct scsi_host_sg_pool scsi_sg_pools[] = { | ||||
| 	SP(8), | ||||
| 	SP(16), | ||||
| #if (SCSI_MAX_SG_SEGMENTS > 16) | ||||
| 	SP(32), | ||||
| #if (SCSI_MAX_PHYS_SEGMENTS > 32) | ||||
| #if (SCSI_MAX_SG_SEGMENTS > 32) | ||||
| 	SP(64), | ||||
| #if (SCSI_MAX_PHYS_SEGMENTS > 64) | ||||
| #if (SCSI_MAX_SG_SEGMENTS > 64) | ||||
| 	SP(128), | ||||
| #if (SCSI_MAX_PHYS_SEGMENTS > 128) | ||||
| 	SP(256), | ||||
| #if (SCSI_MAX_PHYS_SEGMENTS > 256) | ||||
| #error SCSI_MAX_PHYS_SEGMENTS is too large | ||||
| #endif | ||||
| #endif | ||||
| #endif | ||||
| #endif | ||||
| }; 	 | ||||
| }; | ||||
| #undef SP | ||||
| 
 | ||||
| static void scsi_run_queue(struct request_queue *q); | ||||
|  | @ -289,14 +289,16 @@ static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl, | |||
| 	struct request_queue *q = rq->q; | ||||
| 	int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||||
| 	unsigned int data_len = bufflen, len, bytes, off; | ||||
| 	struct scatterlist *sg; | ||||
| 	struct page *page; | ||||
| 	struct bio *bio = NULL; | ||||
| 	int i, err, nr_vecs = 0; | ||||
| 
 | ||||
| 	for (i = 0; i < nsegs; i++) { | ||||
| 		page = sgl[i].page; | ||||
| 		off = sgl[i].offset; | ||||
| 		len = sgl[i].length; | ||||
| 	for_each_sg(sgl, sg, nsegs, i) { | ||||
| 		page = sg->page; | ||||
| 		off = sg->offset; | ||||
| 		len = sg->length; | ||||
|  		data_len += len; | ||||
| 
 | ||||
| 		while (len > 0 && data_len > 0) { | ||||
| 			/*
 | ||||
|  | @ -695,56 +697,170 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, | |||
| 	return NULL; | ||||
| } | ||||
| 
 | ||||
| struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask) | ||||
| /*
 | ||||
|  * Like SCSI_MAX_SG_SEGMENTS, but for archs that have sg chaining. This limit | ||||
|  * is totally arbitrary, a setting of 2048 will get you at least 8mb ios. | ||||
|  */ | ||||
| #define SCSI_MAX_SG_CHAIN_SEGMENTS	2048 | ||||
| 
 | ||||
| static inline unsigned int scsi_sgtable_index(unsigned short nents) | ||||
| { | ||||
| 	struct scsi_host_sg_pool *sgp; | ||||
| 	struct scatterlist *sgl; | ||||
| 	unsigned int index; | ||||
| 
 | ||||
| 	BUG_ON(!cmd->use_sg); | ||||
| 
 | ||||
| 	switch (cmd->use_sg) { | ||||
| 	switch (nents) { | ||||
| 	case 1 ... 8: | ||||
| 		cmd->sglist_len = 0; | ||||
| 		index = 0; | ||||
| 		break; | ||||
| 	case 9 ... 16: | ||||
| 		cmd->sglist_len = 1; | ||||
| 		index = 1; | ||||
| 		break; | ||||
| #if (SCSI_MAX_SG_SEGMENTS > 16) | ||||
| 	case 17 ... 32: | ||||
| 		cmd->sglist_len = 2; | ||||
| 		index = 2; | ||||
| 		break; | ||||
| #if (SCSI_MAX_PHYS_SEGMENTS > 32) | ||||
| #if (SCSI_MAX_SG_SEGMENTS > 32) | ||||
| 	case 33 ... 64: | ||||
| 		cmd->sglist_len = 3; | ||||
| 		index = 3; | ||||
| 		break; | ||||
| #if (SCSI_MAX_PHYS_SEGMENTS > 64) | ||||
| #if (SCSI_MAX_SG_SEGMENTS > 64) | ||||
| 	case 65 ... 128: | ||||
| 		cmd->sglist_len = 4; | ||||
| 		break; | ||||
| #if (SCSI_MAX_PHYS_SEGMENTS  > 128) | ||||
| 	case 129 ... 256: | ||||
| 		cmd->sglist_len = 5; | ||||
| 		index = 4; | ||||
| 		break; | ||||
| #endif | ||||
| #endif | ||||
| #endif | ||||
| 	default: | ||||
| 		return NULL; | ||||
| 		printk(KERN_ERR "scsi: bad segment count=%d\n", nents); | ||||
| 		BUG(); | ||||
| 	} | ||||
| 
 | ||||
| 	sgp = scsi_sg_pools + cmd->sglist_len; | ||||
| 	sgl = mempool_alloc(sgp->pool, gfp_mask); | ||||
| 	return sgl; | ||||
| 	return index; | ||||
| } | ||||
| 
 | ||||
| struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask) | ||||
| { | ||||
| 	struct scsi_host_sg_pool *sgp; | ||||
| 	struct scatterlist *sgl, *prev, *ret; | ||||
| 	unsigned int index; | ||||
| 	int this, left; | ||||
| 
 | ||||
| 	BUG_ON(!cmd->use_sg); | ||||
| 
 | ||||
| 	left = cmd->use_sg; | ||||
| 	ret = prev = NULL; | ||||
| 	do { | ||||
| 		this = left; | ||||
| 		if (this > SCSI_MAX_SG_SEGMENTS) { | ||||
| 			this = SCSI_MAX_SG_SEGMENTS - 1; | ||||
| 			index = SG_MEMPOOL_NR - 1; | ||||
| 		} else | ||||
| 			index = scsi_sgtable_index(this); | ||||
| 
 | ||||
| 		left -= this; | ||||
| 
 | ||||
| 		sgp = scsi_sg_pools + index; | ||||
| 
 | ||||
| 		sgl = mempool_alloc(sgp->pool, gfp_mask); | ||||
| 		if (unlikely(!sgl)) | ||||
| 			goto enomem; | ||||
| 
 | ||||
| 		memset(sgl, 0, sizeof(*sgl) * sgp->size); | ||||
| 
 | ||||
| 		/*
 | ||||
| 		 * first loop through, set initial index and return value | ||||
| 		 */ | ||||
| 		if (!ret) | ||||
| 			ret = sgl; | ||||
| 
 | ||||
| 		/*
 | ||||
| 		 * chain previous sglist, if any. we know the previous | ||||
| 		 * sglist must be the biggest one, or we would not have | ||||
| 		 * ended up doing another loop. | ||||
| 		 */ | ||||
| 		if (prev) | ||||
| 			sg_chain(prev, SCSI_MAX_SG_SEGMENTS, sgl); | ||||
| 
 | ||||
| 		/*
 | ||||
| 		 * don't allow subsequent mempool allocs to sleep, it would | ||||
| 		 * violate the mempool principle. | ||||
| 		 */ | ||||
| 		gfp_mask &= ~__GFP_WAIT; | ||||
| 		gfp_mask |= __GFP_HIGH; | ||||
| 		prev = sgl; | ||||
| 	} while (left); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * ->use_sg may get modified after dma mapping has potentially | ||||
| 	 * shrunk the number of segments, so keep a copy of it for free. | ||||
| 	 */ | ||||
| 	cmd->__use_sg = cmd->use_sg; | ||||
| 	return ret; | ||||
| enomem: | ||||
| 	if (ret) { | ||||
| 		/*
 | ||||
| 		 * Free entries chained off ret. Since we were trying to | ||||
| 		 * allocate another sglist, we know that all entries are of | ||||
| 		 * the max size. | ||||
| 		 */ | ||||
| 		sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1; | ||||
| 		prev = ret; | ||||
| 		ret = &ret[SCSI_MAX_SG_SEGMENTS - 1]; | ||||
| 
 | ||||
| 		while ((sgl = sg_chain_ptr(ret)) != NULL) { | ||||
| 			ret = &sgl[SCSI_MAX_SG_SEGMENTS - 1]; | ||||
| 			mempool_free(sgl, sgp->pool); | ||||
| 		} | ||||
| 
 | ||||
| 		mempool_free(prev, sgp->pool); | ||||
| 	} | ||||
| 	return NULL; | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(scsi_alloc_sgtable); | ||||
| 
 | ||||
| void scsi_free_sgtable(struct scatterlist *sgl, int index) | ||||
| void scsi_free_sgtable(struct scsi_cmnd *cmd) | ||||
| { | ||||
| 	struct scatterlist *sgl = cmd->request_buffer; | ||||
| 	struct scsi_host_sg_pool *sgp; | ||||
| 
 | ||||
| 	BUG_ON(index >= SG_MEMPOOL_NR); | ||||
| 	/*
 | ||||
| 	 * if this is the biggest size sglist, check if we have | ||||
| 	 * chained parts we need to free | ||||
| 	 */ | ||||
| 	if (cmd->__use_sg > SCSI_MAX_SG_SEGMENTS) { | ||||
| 		unsigned short this, left; | ||||
| 		struct scatterlist *next; | ||||
| 		unsigned int index; | ||||
| 
 | ||||
| 		left = cmd->__use_sg - (SCSI_MAX_SG_SEGMENTS - 1); | ||||
| 		next = sg_chain_ptr(&sgl[SCSI_MAX_SG_SEGMENTS - 1]); | ||||
| 		while (left && next) { | ||||
| 			sgl = next; | ||||
| 			this = left; | ||||
| 			if (this > SCSI_MAX_SG_SEGMENTS) { | ||||
| 				this = SCSI_MAX_SG_SEGMENTS - 1; | ||||
| 				index = SG_MEMPOOL_NR - 1; | ||||
| 			} else | ||||
| 				index = scsi_sgtable_index(this); | ||||
| 
 | ||||
| 			left -= this; | ||||
| 
 | ||||
| 			sgp = scsi_sg_pools + index; | ||||
| 
 | ||||
| 			if (left) | ||||
| 				next = sg_chain_ptr(&sgl[sgp->size - 1]); | ||||
| 
 | ||||
| 			mempool_free(sgl, sgp->pool); | ||||
| 		} | ||||
| 
 | ||||
| 		/*
 | ||||
| 		 * Restore original, will be freed below | ||||
| 		 */ | ||||
| 		sgl = cmd->request_buffer; | ||||
| 		sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1; | ||||
| 	} else | ||||
| 		sgp = scsi_sg_pools + scsi_sgtable_index(cmd->__use_sg); | ||||
| 
 | ||||
| 	sgp = scsi_sg_pools + index; | ||||
| 	mempool_free(sgl, sgp->pool); | ||||
| } | ||||
| 
 | ||||
|  | @ -770,7 +886,7 @@ EXPORT_SYMBOL(scsi_free_sgtable); | |||
| static void scsi_release_buffers(struct scsi_cmnd *cmd) | ||||
| { | ||||
| 	if (cmd->use_sg) | ||||
| 		scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len); | ||||
| 		scsi_free_sgtable(cmd); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Zero these out.  They now point to freed memory, and it is | ||||
|  | @ -984,7 +1100,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
| static int scsi_init_io(struct scsi_cmnd *cmd) | ||||
| { | ||||
| 	struct request     *req = cmd->request; | ||||
| 	struct scatterlist *sgpnt; | ||||
| 	int		   count; | ||||
| 
 | ||||
| 	/*
 | ||||
|  | @ -997,14 +1112,13 @@ static int scsi_init_io(struct scsi_cmnd *cmd) | |||
| 	/*
 | ||||
| 	 * If sg table allocation fails, requeue request later. | ||||
| 	 */ | ||||
| 	sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC); | ||||
| 	if (unlikely(!sgpnt)) { | ||||
| 	cmd->request_buffer = scsi_alloc_sgtable(cmd, GFP_ATOMIC); | ||||
| 	if (unlikely(!cmd->request_buffer)) { | ||||
| 		scsi_unprep_request(req); | ||||
| 		return BLKPREP_DEFER; | ||||
| 	} | ||||
| 
 | ||||
| 	req->buffer = NULL; | ||||
| 	cmd->request_buffer = (char *) sgpnt; | ||||
| 	if (blk_pc_request(req)) | ||||
| 		cmd->request_bufflen = req->data_len; | ||||
| 	else | ||||
|  | @ -1529,8 +1643,25 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, | |||
| 	if (!q) | ||||
| 		return NULL; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * this limit is imposed by hardware restrictions | ||||
| 	 */ | ||||
| 	blk_queue_max_hw_segments(q, shost->sg_tablesize); | ||||
| 	blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * In the future, sg chaining support will be mandatory and this | ||||
| 	 * ifdef can then go away. Right now we don't have all archs | ||||
| 	 * converted, so better keep it safe. | ||||
| 	 */ | ||||
| #ifdef ARCH_HAS_SG_CHAIN | ||||
| 	if (shost->use_sg_chaining) | ||||
| 		blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS); | ||||
| 	else | ||||
| 		blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS); | ||||
| #else | ||||
| 	blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS); | ||||
| #endif | ||||
| 
 | ||||
| 	blk_queue_max_sectors(q, shost->max_sectors); | ||||
| 	blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); | ||||
| 	blk_queue_segment_boundary(q, shost->dma_boundary); | ||||
|  | @ -2193,18 +2324,19 @@ EXPORT_SYMBOL_GPL(scsi_target_unblock); | |||
|  * | ||||
|  * Returns virtual address of the start of the mapped page | ||||
|  */ | ||||
| void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count, | ||||
| void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, | ||||
| 			  size_t *offset, size_t *len) | ||||
| { | ||||
| 	int i; | ||||
| 	size_t sg_len = 0, len_complete = 0; | ||||
| 	struct scatterlist *sg; | ||||
| 	struct page *page; | ||||
| 
 | ||||
| 	WARN_ON(!irqs_disabled()); | ||||
| 
 | ||||
| 	for (i = 0; i < sg_count; i++) { | ||||
| 	for_each_sg(sgl, sg, sg_count, i) { | ||||
| 		len_complete = sg_len; /* Complete sg-entries */ | ||||
| 		sg_len += sg[i].length; | ||||
| 		sg_len += sg->length; | ||||
| 		if (sg_len > *offset) | ||||
| 			break; | ||||
| 	} | ||||
|  | @ -2218,10 +2350,10 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count, | |||
| 	} | ||||
| 
 | ||||
| 	/* Offset starting from the beginning of first page in this sg-entry */ | ||||
| 	*offset = *offset - len_complete + sg[i].offset; | ||||
| 	*offset = *offset - len_complete + sg->offset; | ||||
| 
 | ||||
| 	/* Assumption: contiguous pages can be accessed as "page + i" */ | ||||
| 	page = nth_page(sg[i].page, (*offset >> PAGE_SHIFT)); | ||||
| 	page = nth_page(sg->page, (*offset >> PAGE_SHIFT)); | ||||
| 	*offset &= ~PAGE_MASK; | ||||
| 
 | ||||
| 	/* Bytes in this sg-entry from *offset to the end of the page */ | ||||
|  |  | |||
|  | @ -332,7 +332,7 @@ static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd) | |||
| 	scsi_tgt_uspace_send_status(cmd, tcmd->itn_id, tcmd->tag); | ||||
| 
 | ||||
| 	if (cmd->request_buffer) | ||||
| 		scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len); | ||||
| 		scsi_free_sgtable(cmd); | ||||
| 
 | ||||
| 	queue_work(scsi_tgtd, &tcmd->work); | ||||
| } | ||||
|  | @ -373,7 +373,7 @@ static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask) | |||
| 	} | ||||
| 
 | ||||
| 	eprintk("cmd %p cnt %d\n", cmd, cmd->use_sg); | ||||
| 	scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len); | ||||
| 	scsi_free_sgtable(cmd); | ||||
| 	return -EINVAL; | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -1165,7 +1165,7 @@ sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type) | |||
| 	sg = rsv_schp->buffer; | ||||
| 	sa = vma->vm_start; | ||||
| 	for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); | ||||
| 	     ++k, ++sg) { | ||||
| 	     ++k, sg = sg_next(sg)) { | ||||
| 		len = vma->vm_end - sa; | ||||
| 		len = (len < sg->length) ? len : sg->length; | ||||
| 		if (offset < len) { | ||||
|  | @ -1209,7 +1209,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma) | |||
| 	sa = vma->vm_start; | ||||
| 	sg = rsv_schp->buffer; | ||||
| 	for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); | ||||
| 	     ++k, ++sg) { | ||||
| 	     ++k, sg = sg_next(sg)) { | ||||
| 		len = vma->vm_end - sa; | ||||
| 		len = (len < sg->length) ? len : sg->length; | ||||
| 		sa += len; | ||||
|  | @ -1840,7 +1840,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) | |||
| 	} | ||||
| 	for (k = 0, sg = schp->buffer, rem_sz = blk_size; | ||||
| 	     (rem_sz > 0) && (k < mx_sc_elems); | ||||
| 	     ++k, rem_sz -= ret_sz, ++sg) { | ||||
| 	     ++k, rem_sz -= ret_sz, sg = sg_next(sg)) { | ||||
| 		 | ||||
| 		num = (rem_sz > scatter_elem_sz_prev) ? | ||||
| 		      scatter_elem_sz_prev : rem_sz; | ||||
|  | @ -1913,7 +1913,7 @@ sg_write_xfer(Sg_request * srp) | |||
| 		if (res) | ||||
| 			return res; | ||||
| 
 | ||||
| 		for (; p; ++sg, ksglen = sg->length, | ||||
| 		for (; p; sg = sg_next(sg), ksglen = sg->length, | ||||
| 		     p = page_address(sg->page)) { | ||||
| 			if (usglen <= 0) | ||||
| 				break; | ||||
|  | @ -1992,7 +1992,7 @@ sg_remove_scat(Sg_scatter_hold * schp) | |||
| 			int k; | ||||
| 
 | ||||
| 			for (k = 0; (k < schp->k_use_sg) && sg->page; | ||||
| 			     ++k, ++sg) { | ||||
| 			     ++k, sg = sg_next(sg)) { | ||||
| 				SCSI_LOG_TIMEOUT(5, printk( | ||||
| 				    "sg_remove_scat: k=%d, pg=0x%p, len=%d\n", | ||||
| 				    k, sg->page, sg->length)); | ||||
|  | @ -2045,7 +2045,7 @@ sg_read_xfer(Sg_request * srp) | |||
| 		if (res) | ||||
| 			return res; | ||||
| 
 | ||||
| 		for (; p; ++sg, ksglen = sg->length, | ||||
| 		for (; p; sg = sg_next(sg), ksglen = sg->length, | ||||
| 		     p = page_address(sg->page)) { | ||||
| 			if (usglen <= 0) | ||||
| 				break; | ||||
|  | @ -2092,7 +2092,7 @@ sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer) | |||
| 	if ((!outp) || (num_read_xfer <= 0)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	for (k = 0; (k < schp->k_use_sg) && sg->page; ++k, ++sg) { | ||||
| 	for (k = 0; (k < schp->k_use_sg) && sg->page; ++k, sg = sg_next(sg)) { | ||||
| 		num = sg->length; | ||||
| 		if (num > num_read_xfer) { | ||||
| 			if (__copy_to_user(outp, page_address(sg->page), | ||||
|  | @ -2142,7 +2142,7 @@ sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size) | |||
| 	SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size)); | ||||
| 	rem = size; | ||||
| 
 | ||||
| 	for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sg) { | ||||
| 	for (k = 0; k < rsv_schp->k_use_sg; ++k, sg = sg_next(sg)) { | ||||
| 		num = sg->length; | ||||
| 		if (rem <= num) { | ||||
| 			sfp->save_scat_len = num; | ||||
|  |  | |||
|  | @ -1123,6 +1123,7 @@ static struct scsi_host_template driver_template = { | |||
| 	.this_id			= -1, | ||||
| 	.sg_tablesize			= ST_MAX_SG, | ||||
| 	.cmd_per_lun			= ST_CMD_PER_LUN, | ||||
| 	.use_sg_chaining		= ENABLE_SG_CHAINING, | ||||
| }; | ||||
| 
 | ||||
| static int stex_set_dma_mask(struct pci_dev * pdev) | ||||
|  |  | |||
|  | @ -854,5 +854,6 @@ static struct scsi_host_template driver_template = { | |||
| 	.cmd_per_lun =		1, | ||||
| 	.unchecked_isa_dma =	1, | ||||
| 	.use_clustering =	ENABLE_CLUSTERING, | ||||
| 	.use_sg_chaining =	ENABLE_SG_CHAINING, | ||||
| }; | ||||
| #include "scsi_module.c" | ||||
|  |  | |||
|  | @ -1808,6 +1808,7 @@ static struct scsi_host_template sym2_template = { | |||
| 	.eh_host_reset_handler	= sym53c8xx_eh_host_reset_handler, | ||||
| 	.this_id		= 7, | ||||
| 	.use_clustering		= ENABLE_CLUSTERING, | ||||
| 	.use_sg_chaining	= ENABLE_SG_CHAINING, | ||||
| 	.max_sectors		= 0xFFFF, | ||||
| #ifdef SYM_LINUX_PROC_INFO_SUPPORT | ||||
| 	.proc_info		= sym53c8xx_proc_info, | ||||
|  |  | |||
|  | @ -450,7 +450,8 @@ static struct scsi_host_template driver_template = { | |||
|                 .slave_configure         = u14_34f_slave_configure, | ||||
|                 .this_id                 = 7, | ||||
|                 .unchecked_isa_dma       = 1, | ||||
|                 .use_clustering          = ENABLE_CLUSTERING | ||||
|                 .use_clustering          = ENABLE_CLUSTERING, | ||||
|                 .use_sg_chaining         = ENABLE_SG_CHAINING, | ||||
|                 }; | ||||
| 
 | ||||
| #if !defined(__BIG_ENDIAN_BITFIELD) && !defined(__LITTLE_ENDIAN_BITFIELD) | ||||
|  |  | |||
|  | @ -1197,5 +1197,6 @@ static struct scsi_host_template driver_template = { | |||
| 	.cmd_per_lun       = ULTRASTOR_MAX_CMDS_PER_LUN, | ||||
| 	.unchecked_isa_dma = 1, | ||||
| 	.use_clustering    = ENABLE_CLUSTERING, | ||||
| 	.use_sg_chaining   = ENABLE_SG_CHAINING, | ||||
| }; | ||||
| #include "scsi_module.c" | ||||
|  |  | |||
|  | @ -1671,6 +1671,7 @@ static struct scsi_host_template driver_template = { | |||
| 	.cmd_per_lun		= 1, | ||||
| 	.unchecked_isa_dma	= 1, | ||||
| 	.use_clustering		= ENABLE_CLUSTERING, | ||||
| 	.use_sg_chaining	= ENABLE_SG_CHAINING, | ||||
| }; | ||||
| 
 | ||||
| #include "scsi_module.c" | ||||
|  |  | |||
|  | @ -798,12 +798,13 @@ static int alauda_read_data(struct us_data *us, unsigned long address, | |||
| { | ||||
| 	unsigned char *buffer; | ||||
| 	u16 lba, max_lba; | ||||
| 	unsigned int page, len, index, offset; | ||||
| 	unsigned int page, len, offset; | ||||
| 	unsigned int blockshift = MEDIA_INFO(us).blockshift; | ||||
| 	unsigned int pageshift = MEDIA_INFO(us).pageshift; | ||||
| 	unsigned int blocksize = MEDIA_INFO(us).blocksize; | ||||
| 	unsigned int pagesize = MEDIA_INFO(us).pagesize; | ||||
| 	unsigned int uzonesize = MEDIA_INFO(us).uzonesize; | ||||
| 	struct scatterlist *sg; | ||||
| 	int result; | ||||
| 
 | ||||
| 	/*
 | ||||
|  | @ -827,7 +828,8 @@ static int alauda_read_data(struct us_data *us, unsigned long address, | |||
| 	max_lba = MEDIA_INFO(us).capacity >> (blockshift + pageshift); | ||||
| 
 | ||||
| 	result = USB_STOR_TRANSPORT_GOOD; | ||||
| 	index = offset = 0; | ||||
| 	offset = 0; | ||||
| 	sg = NULL; | ||||
| 
 | ||||
| 	while (sectors > 0) { | ||||
| 		unsigned int zone = lba / uzonesize; /* integer division */ | ||||
|  | @ -873,7 +875,7 @@ static int alauda_read_data(struct us_data *us, unsigned long address, | |||
| 
 | ||||
| 		/* Store the data in the transfer buffer */ | ||||
| 		usb_stor_access_xfer_buf(buffer, len, us->srb, | ||||
| 				&index, &offset, TO_XFER_BUF); | ||||
| 				&sg, &offset, TO_XFER_BUF); | ||||
| 
 | ||||
| 		page = 0; | ||||
| 		lba++; | ||||
|  | @ -891,11 +893,12 @@ static int alauda_write_data(struct us_data *us, unsigned long address, | |||
| 		unsigned int sectors) | ||||
| { | ||||
| 	unsigned char *buffer, *blockbuffer; | ||||
| 	unsigned int page, len, index, offset; | ||||
| 	unsigned int page, len, offset; | ||||
| 	unsigned int blockshift = MEDIA_INFO(us).blockshift; | ||||
| 	unsigned int pageshift = MEDIA_INFO(us).pageshift; | ||||
| 	unsigned int blocksize = MEDIA_INFO(us).blocksize; | ||||
| 	unsigned int pagesize = MEDIA_INFO(us).pagesize; | ||||
| 	struct scatterlist *sg; | ||||
| 	u16 lba, max_lba; | ||||
| 	int result; | ||||
| 
 | ||||
|  | @ -929,7 +932,8 @@ static int alauda_write_data(struct us_data *us, unsigned long address, | |||
| 	max_lba = MEDIA_INFO(us).capacity >> (pageshift + blockshift); | ||||
| 
 | ||||
| 	result = USB_STOR_TRANSPORT_GOOD; | ||||
| 	index = offset = 0; | ||||
| 	offset = 0; | ||||
| 	sg = NULL; | ||||
| 
 | ||||
| 	while (sectors > 0) { | ||||
| 		/* Write as many sectors as possible in this block */ | ||||
|  | @ -946,7 +950,7 @@ static int alauda_write_data(struct us_data *us, unsigned long address, | |||
| 
 | ||||
| 		/* Get the data from the transfer buffer */ | ||||
| 		usb_stor_access_xfer_buf(buffer, len, us->srb, | ||||
| 				&index, &offset, FROM_XFER_BUF); | ||||
| 				&sg, &offset, FROM_XFER_BUF); | ||||
| 
 | ||||
| 		result = alauda_write_lba(us, lba, page, pages, buffer, | ||||
| 			blockbuffer); | ||||
|  |  | |||
|  | @ -98,7 +98,8 @@ static int datafab_read_data(struct us_data *us, | |||
| 	unsigned char  thistime; | ||||
| 	unsigned int totallen, alloclen; | ||||
| 	int len, result; | ||||
| 	unsigned int sg_idx = 0, sg_offset = 0; | ||||
| 	unsigned int sg_offset = 0; | ||||
| 	struct scatterlist *sg = NULL; | ||||
| 
 | ||||
| 	// we're working in LBA mode.  according to the ATA spec, 
 | ||||
| 	// we can support up to 28-bit addressing.  I don't know if Datafab
 | ||||
|  | @ -155,7 +156,7 @@ static int datafab_read_data(struct us_data *us, | |||
| 
 | ||||
| 		// Store the data in the transfer buffer
 | ||||
| 		usb_stor_access_xfer_buf(buffer, len, us->srb, | ||||
| 				 &sg_idx, &sg_offset, TO_XFER_BUF); | ||||
| 				 &sg, &sg_offset, TO_XFER_BUF); | ||||
| 
 | ||||
| 		sector += thistime; | ||||
| 		totallen -= len; | ||||
|  | @ -181,7 +182,8 @@ static int datafab_write_data(struct us_data *us, | |||
| 	unsigned char thistime; | ||||
| 	unsigned int totallen, alloclen; | ||||
| 	int len, result; | ||||
| 	unsigned int sg_idx = 0, sg_offset = 0; | ||||
| 	unsigned int sg_offset = 0; | ||||
| 	struct scatterlist *sg = NULL; | ||||
| 
 | ||||
| 	// we're working in LBA mode.  according to the ATA spec, 
 | ||||
| 	// we can support up to 28-bit addressing.  I don't know if Datafab
 | ||||
|  | @ -217,7 +219,7 @@ static int datafab_write_data(struct us_data *us, | |||
| 
 | ||||
| 		// Get the data from the transfer buffer
 | ||||
| 		usb_stor_access_xfer_buf(buffer, len, us->srb, | ||||
| 				&sg_idx, &sg_offset, FROM_XFER_BUF); | ||||
| 				&sg, &sg_offset, FROM_XFER_BUF); | ||||
| 
 | ||||
| 		command[0] = 0; | ||||
| 		command[1] = thistime; | ||||
|  |  | |||
|  | @ -119,7 +119,8 @@ static int jumpshot_read_data(struct us_data *us, | |||
| 	unsigned char  thistime; | ||||
| 	unsigned int totallen, alloclen; | ||||
| 	int len, result; | ||||
| 	unsigned int sg_idx = 0, sg_offset = 0; | ||||
| 	unsigned int sg_offset = 0; | ||||
| 	struct scatterlist *sg = NULL; | ||||
| 
 | ||||
| 	// we're working in LBA mode.  according to the ATA spec, 
 | ||||
| 	// we can support up to 28-bit addressing.  I don't know if Jumpshot
 | ||||
|  | @ -170,7 +171,7 @@ static int jumpshot_read_data(struct us_data *us, | |||
| 
 | ||||
| 		// Store the data in the transfer buffer
 | ||||
| 		usb_stor_access_xfer_buf(buffer, len, us->srb, | ||||
| 				 &sg_idx, &sg_offset, TO_XFER_BUF); | ||||
| 				 &sg, &sg_offset, TO_XFER_BUF); | ||||
| 
 | ||||
| 		sector += thistime; | ||||
| 		totallen -= len; | ||||
|  | @ -195,7 +196,8 @@ static int jumpshot_write_data(struct us_data *us, | |||
| 	unsigned char  thistime; | ||||
| 	unsigned int totallen, alloclen; | ||||
| 	int len, result, waitcount; | ||||
| 	unsigned int sg_idx = 0, sg_offset = 0; | ||||
| 	unsigned int sg_offset = 0; | ||||
| 	struct scatterlist *sg = NULL; | ||||
| 
 | ||||
| 	// we're working in LBA mode.  according to the ATA spec, 
 | ||||
| 	// we can support up to 28-bit addressing.  I don't know if Jumpshot
 | ||||
|  | @ -225,7 +227,7 @@ static int jumpshot_write_data(struct us_data *us, | |||
| 
 | ||||
| 		// Get the data from the transfer buffer
 | ||||
| 		usb_stor_access_xfer_buf(buffer, len, us->srb, | ||||
| 				&sg_idx, &sg_offset, FROM_XFER_BUF); | ||||
| 				&sg, &sg_offset, FROM_XFER_BUF); | ||||
| 
 | ||||
| 		command[0] = 0; | ||||
| 		command[1] = thistime; | ||||
|  |  | |||
|  | @ -157,7 +157,7 @@ void usb_stor_transparent_scsi_command(struct scsi_cmnd *srb, | |||
|  * pick up from where this one left off. */ | ||||
| 
 | ||||
| unsigned int usb_stor_access_xfer_buf(unsigned char *buffer, | ||||
| 	unsigned int buflen, struct scsi_cmnd *srb, unsigned int *index, | ||||
| 	unsigned int buflen, struct scsi_cmnd *srb, struct scatterlist **sgptr, | ||||
| 	unsigned int *offset, enum xfer_buf_dir dir) | ||||
| { | ||||
| 	unsigned int cnt; | ||||
|  | @ -184,16 +184,17 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer, | |||
| 	 * located in high memory -- then kmap() will map it to a temporary | ||||
| 	 * position in the kernel's virtual address space. */ | ||||
| 	} else { | ||||
| 		struct scatterlist *sg = | ||||
| 				(struct scatterlist *) srb->request_buffer | ||||
| 				+ *index; | ||||
| 		struct scatterlist *sg = *sgptr; | ||||
| 
 | ||||
| 		if (!sg) | ||||
| 			sg = (struct scatterlist *) srb->request_buffer; | ||||
| 
 | ||||
| 		/* This loop handles a single s-g list entry, which may
 | ||||
| 		 * include multiple pages.  Find the initial page structure | ||||
| 		 * and the starting offset within the page, and update | ||||
| 		 * the *offset and *index values for the next loop. */ | ||||
| 		cnt = 0; | ||||
| 		while (cnt < buflen && *index < srb->use_sg) { | ||||
| 		while (cnt < buflen) { | ||||
| 			struct page *page = sg->page + | ||||
| 					((sg->offset + *offset) >> PAGE_SHIFT); | ||||
| 			unsigned int poff = | ||||
|  | @ -209,8 +210,7 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer, | |||
| 
 | ||||
| 				/* Transfer continues to next s-g entry */ | ||||
| 				*offset = 0; | ||||
| 				++*index; | ||||
| 				++sg; | ||||
| 				sg = sg_next(sg); | ||||
| 			} | ||||
| 
 | ||||
| 			/* Transfer the data for all the pages in this
 | ||||
|  | @ -234,6 +234,7 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer, | |||
| 				sglen -= plen; | ||||
| 			} | ||||
| 		} | ||||
| 		*sgptr = sg; | ||||
| 	} | ||||
| 
 | ||||
| 	/* Return the amount actually transferred */ | ||||
|  | @ -245,9 +246,10 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer, | |||
| void usb_stor_set_xfer_buf(unsigned char *buffer, | ||||
| 	unsigned int buflen, struct scsi_cmnd *srb) | ||||
| { | ||||
| 	unsigned int index = 0, offset = 0; | ||||
| 	unsigned int offset = 0; | ||||
| 	struct scatterlist *sg = NULL; | ||||
| 
 | ||||
| 	usb_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset, | ||||
| 	usb_stor_access_xfer_buf(buffer, buflen, srb, &sg, &offset, | ||||
| 			TO_XFER_BUF); | ||||
| 	if (buflen < srb->request_bufflen) | ||||
| 		srb->resid = srb->request_bufflen - buflen; | ||||
|  |  | |||
|  | @ -52,7 +52,7 @@ extern void usb_stor_transparent_scsi_command(struct scsi_cmnd*, | |||
| enum xfer_buf_dir	{TO_XFER_BUF, FROM_XFER_BUF}; | ||||
| 
 | ||||
| extern unsigned int usb_stor_access_xfer_buf(unsigned char *buffer, | ||||
| 	unsigned int buflen, struct scsi_cmnd *srb, unsigned int *index, | ||||
| 	unsigned int buflen, struct scsi_cmnd *srb, struct scatterlist **, | ||||
| 	unsigned int *offset, enum xfer_buf_dir dir); | ||||
| 
 | ||||
| extern void usb_stor_set_xfer_buf(unsigned char *buffer, | ||||
|  |  | |||
|  | @ -705,7 +705,8 @@ sddr09_read_data(struct us_data *us, | |||
| 	unsigned char *buffer; | ||||
| 	unsigned int lba, maxlba, pba; | ||||
| 	unsigned int page, pages; | ||||
| 	unsigned int len, index, offset; | ||||
| 	unsigned int len, offset; | ||||
| 	struct scatterlist *sg; | ||||
| 	int result; | ||||
| 
 | ||||
| 	// Figure out the initial LBA and page
 | ||||
|  | @ -730,7 +731,8 @@ sddr09_read_data(struct us_data *us, | |||
| 	// contiguous LBA's. Another exercise left to the student.
 | ||||
| 
 | ||||
| 	result = 0; | ||||
| 	index = offset = 0; | ||||
| 	offset = 0; | ||||
| 	sg = NULL; | ||||
| 
 | ||||
| 	while (sectors > 0) { | ||||
| 
 | ||||
|  | @ -777,7 +779,7 @@ sddr09_read_data(struct us_data *us, | |||
| 
 | ||||
| 		// Store the data in the transfer buffer
 | ||||
| 		usb_stor_access_xfer_buf(buffer, len, us->srb, | ||||
| 				&index, &offset, TO_XFER_BUF); | ||||
| 				&sg, &offset, TO_XFER_BUF); | ||||
| 
 | ||||
| 		page = 0; | ||||
| 		lba++; | ||||
|  | @ -931,7 +933,8 @@ sddr09_write_data(struct us_data *us, | |||
| 	unsigned int pagelen, blocklen; | ||||
| 	unsigned char *blockbuffer; | ||||
| 	unsigned char *buffer; | ||||
| 	unsigned int len, index, offset; | ||||
| 	unsigned int len, offset; | ||||
| 	struct scatterlist *sg; | ||||
| 	int result; | ||||
| 
 | ||||
| 	// Figure out the initial LBA and page
 | ||||
|  | @ -968,7 +971,8 @@ sddr09_write_data(struct us_data *us, | |||
| 	} | ||||
| 
 | ||||
| 	result = 0; | ||||
| 	index = offset = 0; | ||||
| 	offset = 0; | ||||
| 	sg = NULL; | ||||
| 
 | ||||
| 	while (sectors > 0) { | ||||
| 
 | ||||
|  | @ -987,7 +991,7 @@ sddr09_write_data(struct us_data *us, | |||
| 
 | ||||
| 		// Get the data from the transfer buffer
 | ||||
| 		usb_stor_access_xfer_buf(buffer, len, us->srb, | ||||
| 				&index, &offset, FROM_XFER_BUF); | ||||
| 				&sg, &offset, FROM_XFER_BUF); | ||||
| 
 | ||||
| 		result = sddr09_write_lba(us, lba, page, pages, | ||||
| 				buffer, blockbuffer); | ||||
|  |  | |||
|  | @ -167,7 +167,8 @@ static int sddr55_read_data(struct us_data *us, | |||
| 	unsigned long address; | ||||
| 
 | ||||
| 	unsigned short pages; | ||||
| 	unsigned int len, index, offset; | ||||
| 	unsigned int len, offset; | ||||
| 	struct scatterlist *sg; | ||||
| 
 | ||||
| 	// Since we only read in one block at a time, we have to create
 | ||||
| 	// a bounce buffer and move the data a piece at a time between the
 | ||||
|  | @ -178,7 +179,8 @@ static int sddr55_read_data(struct us_data *us, | |||
| 	buffer = kmalloc(len, GFP_NOIO); | ||||
| 	if (buffer == NULL) | ||||
| 		return USB_STOR_TRANSPORT_ERROR; /* out of memory */ | ||||
| 	index = offset = 0; | ||||
| 	offset = 0; | ||||
| 	sg = NULL; | ||||
| 
 | ||||
| 	while (sectors>0) { | ||||
| 
 | ||||
|  | @ -255,7 +257,7 @@ static int sddr55_read_data(struct us_data *us, | |||
| 
 | ||||
| 		// Store the data in the transfer buffer
 | ||||
| 		usb_stor_access_xfer_buf(buffer, len, us->srb, | ||||
| 				&index, &offset, TO_XFER_BUF); | ||||
| 				&sg, &offset, TO_XFER_BUF); | ||||
| 
 | ||||
| 		page = 0; | ||||
| 		lba++; | ||||
|  | @ -287,7 +289,8 @@ static int sddr55_write_data(struct us_data *us, | |||
| 
 | ||||
| 	unsigned short pages; | ||||
| 	int i; | ||||
| 	unsigned int len, index, offset; | ||||
| 	unsigned int len, offset; | ||||
| 	struct scatterlist *sg; | ||||
| 
 | ||||
| 	/* check if we are allowed to write */ | ||||
| 	if (info->read_only || info->force_read_only) { | ||||
|  | @ -304,7 +307,8 @@ static int sddr55_write_data(struct us_data *us, | |||
| 	buffer = kmalloc(len, GFP_NOIO); | ||||
| 	if (buffer == NULL) | ||||
| 		return USB_STOR_TRANSPORT_ERROR; | ||||
| 	index = offset = 0; | ||||
| 	offset = 0; | ||||
| 	sg = NULL; | ||||
| 
 | ||||
| 	while (sectors > 0) { | ||||
| 
 | ||||
|  | @ -322,7 +326,7 @@ static int sddr55_write_data(struct us_data *us, | |||
| 
 | ||||
| 		// Get the data from the transfer buffer
 | ||||
| 		usb_stor_access_xfer_buf(buffer, len, us->srb, | ||||
| 				&index, &offset, FROM_XFER_BUF); | ||||
| 				&sg, &offset, FROM_XFER_BUF); | ||||
| 
 | ||||
| 		US_DEBUGP("Write %02X pages, to PBA %04X" | ||||
| 			" (LBA %04X) page %02X\n", | ||||
|  |  | |||
|  | @ -993,7 +993,8 @@ static int usbat_flash_read_data(struct us_data *us, | |||
| 	unsigned char  thistime; | ||||
| 	unsigned int totallen, alloclen; | ||||
| 	int len, result; | ||||
| 	unsigned int sg_idx = 0, sg_offset = 0; | ||||
| 	unsigned int sg_offset = 0; | ||||
| 	struct scatterlist *sg = NULL; | ||||
| 
 | ||||
| 	result = usbat_flash_check_media(us, info); | ||||
| 	if (result != USB_STOR_TRANSPORT_GOOD) | ||||
|  | @ -1047,7 +1048,7 @@ static int usbat_flash_read_data(struct us_data *us, | |||
| 	 | ||||
| 		/* Store the data in the transfer buffer */ | ||||
| 		usb_stor_access_xfer_buf(buffer, len, us->srb, | ||||
| 					 &sg_idx, &sg_offset, TO_XFER_BUF); | ||||
| 					 &sg, &sg_offset, TO_XFER_BUF); | ||||
| 
 | ||||
| 		sector += thistime; | ||||
| 		totallen -= len; | ||||
|  | @ -1083,7 +1084,8 @@ static int usbat_flash_write_data(struct us_data *us, | |||
| 	unsigned char  thistime; | ||||
| 	unsigned int totallen, alloclen; | ||||
| 	int len, result; | ||||
| 	unsigned int sg_idx = 0, sg_offset = 0; | ||||
| 	unsigned int sg_offset = 0; | ||||
| 	struct scatterlist *sg = NULL; | ||||
| 
 | ||||
| 	result = usbat_flash_check_media(us, info); | ||||
| 	if (result != USB_STOR_TRANSPORT_GOOD) | ||||
|  | @ -1122,7 +1124,7 @@ static int usbat_flash_write_data(struct us_data *us, | |||
| 
 | ||||
| 		/* Get the data from the transfer buffer */ | ||||
| 		usb_stor_access_xfer_buf(buffer, len, us->srb, | ||||
| 					 &sg_idx, &sg_offset, FROM_XFER_BUF); | ||||
| 					 &sg, &sg_offset, FROM_XFER_BUF); | ||||
| 
 | ||||
| 		/* ATA command 0x30 (WRITE SECTORS) */ | ||||
| 		usbat_pack_ata_sector_cmd(command, thistime, sector, 0x30); | ||||
|  | @ -1162,8 +1164,8 @@ static int usbat_hp8200e_handle_read10(struct us_data *us, | |||
| 	unsigned char *buffer; | ||||
| 	unsigned int len; | ||||
| 	unsigned int sector; | ||||
| 	unsigned int sg_segment = 0; | ||||
| 	unsigned int sg_offset = 0; | ||||
| 	struct scatterlist *sg = NULL; | ||||
| 
 | ||||
| 	US_DEBUGP("handle_read10: transfersize %d\n", | ||||
| 		srb->transfersize); | ||||
|  | @ -1220,9 +1222,6 @@ static int usbat_hp8200e_handle_read10(struct us_data *us, | |||
| 	sector |= short_pack(data[7+5], data[7+4]); | ||||
| 	transferred = 0; | ||||
| 
 | ||||
| 	sg_segment = 0; /* for keeping track of where we are in */ | ||||
| 	sg_offset = 0;  /* the scatter/gather list */ | ||||
| 
 | ||||
| 	while (transferred != srb->request_bufflen) { | ||||
| 
 | ||||
| 		if (len > srb->request_bufflen - transferred) | ||||
|  | @ -1255,7 +1254,7 @@ static int usbat_hp8200e_handle_read10(struct us_data *us, | |||
| 
 | ||||
| 		/* Store the data in the transfer buffer */ | ||||
| 		usb_stor_access_xfer_buf(buffer, len, srb, | ||||
| 				 &sg_segment, &sg_offset, TO_XFER_BUF); | ||||
| 				 &sg, &sg_offset, TO_XFER_BUF); | ||||
| 
 | ||||
| 		/* Update the amount transferred and the sector number */ | ||||
| 
 | ||||
|  |  | |||
|  | @ -6,7 +6,7 @@ | |||
|  *	David Mosberger-Tang <davidm@hpl.hp.com> | ||||
|  */ | ||||
| #include <asm/machvec.h> | ||||
| #include <asm/scatterlist.h> | ||||
| #include <linux/scatterlist.h> | ||||
| 
 | ||||
| #define dma_alloc_coherent	platform_dma_alloc_coherent | ||||
| /* coherent mem. is cheap */ | ||||
|  |  | |||
|  | @ -30,4 +30,6 @@ struct scatterlist { | |||
| #define sg_dma_len(sg)		((sg)->dma_length) | ||||
| #define sg_dma_address(sg)	((sg)->dma_address) | ||||
| 
 | ||||
| #define	ARCH_HAS_SG_CHAIN | ||||
| 
 | ||||
| #endif /* _ASM_IA64_SCATTERLIST_H */ | ||||
|  |  | |||
|  | @ -6,149 +6,6 @@ | |||
|  */ | ||||
| #ifndef _ASM_DMA_MAPPING_H | ||||
| #define _ASM_DMA_MAPPING_H | ||||
| #ifdef __KERNEL__ | ||||
| 
 | ||||
| #include <linux/types.h> | ||||
| #include <linux/cache.h> | ||||
| /* need struct page definitions */ | ||||
| #include <linux/mm.h> | ||||
| #include <asm/scatterlist.h> | ||||
| #include <asm/io.h> | ||||
| 
 | ||||
| #define DMA_ERROR_CODE		(~(dma_addr_t)0x0) | ||||
| 
 | ||||
| #ifdef CONFIG_NOT_COHERENT_CACHE | ||||
| /*
 | ||||
|  * DMA-consistent mapping functions for PowerPCs that don't support | ||||
|  * cache snooping.  These allocate/free a region of uncached mapped | ||||
|  * memory space for use with DMA devices.  Alternatively, you could | ||||
|  * allocate the space "normally" and use the cache management functions | ||||
|  * to ensure it is consistent. | ||||
|  */ | ||||
| extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp); | ||||
| extern void __dma_free_coherent(size_t size, void *vaddr); | ||||
| extern void __dma_sync(void *vaddr, size_t size, int direction); | ||||
| extern void __dma_sync_page(struct page *page, unsigned long offset, | ||||
| 				 size_t size, int direction); | ||||
| 
 | ||||
| #else /* ! CONFIG_NOT_COHERENT_CACHE */ | ||||
| /*
 | ||||
|  * Cache coherent cores. | ||||
|  */ | ||||
| 
 | ||||
| #define __dma_alloc_coherent(gfp, size, handle)	NULL | ||||
| #define __dma_free_coherent(size, addr)		((void)0) | ||||
| #define __dma_sync(addr, size, rw)		((void)0) | ||||
| #define __dma_sync_page(pg, off, sz, rw)	((void)0) | ||||
| 
 | ||||
| #endif /* ! CONFIG_NOT_COHERENT_CACHE */ | ||||
| 
 | ||||
| #ifdef CONFIG_PPC64 | ||||
| /*
 | ||||
|  * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO | ||||
|  */ | ||||
| struct dma_mapping_ops { | ||||
| 	void *		(*alloc_coherent)(struct device *dev, size_t size, | ||||
| 				dma_addr_t *dma_handle, gfp_t flag); | ||||
| 	void		(*free_coherent)(struct device *dev, size_t size, | ||||
| 				void *vaddr, dma_addr_t dma_handle); | ||||
| 	dma_addr_t	(*map_single)(struct device *dev, void *ptr, | ||||
| 				size_t size, enum dma_data_direction direction); | ||||
| 	void		(*unmap_single)(struct device *dev, dma_addr_t dma_addr, | ||||
| 				size_t size, enum dma_data_direction direction); | ||||
| 	int		(*map_sg)(struct device *dev, struct scatterlist *sg, | ||||
| 				int nents, enum dma_data_direction direction); | ||||
| 	void		(*unmap_sg)(struct device *dev, struct scatterlist *sg, | ||||
| 				int nents, enum dma_data_direction direction); | ||||
| 	int		(*dma_supported)(struct device *dev, u64 mask); | ||||
| 	int		(*set_dma_mask)(struct device *dev, u64 dma_mask); | ||||
| }; | ||||
| 
 | ||||
| static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) | ||||
| { | ||||
| 	/* We don't handle the NULL dev case for ISA for now. We could
 | ||||
| 	 * do it via an out of line call but it is not needed for now. The | ||||
| 	 * only ISA DMA device we support is the floppy and we have a hack | ||||
| 	 * in the floppy driver directly to get a device for us. | ||||
| 	 */ | ||||
| 	if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL)) | ||||
| 		return NULL; | ||||
| 	return dev->archdata.dma_ops; | ||||
| } | ||||
| 
 | ||||
| static inline int dma_supported(struct device *dev, u64 mask) | ||||
| { | ||||
| 	struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||||
| 
 | ||||
| 	if (unlikely(dma_ops == NULL)) | ||||
| 		return 0; | ||||
| 	if (dma_ops->dma_supported == NULL) | ||||
| 		return 1; | ||||
| 	return dma_ops->dma_supported(dev, mask); | ||||
| } | ||||
| 
 | ||||
| static inline int dma_set_mask(struct device *dev, u64 dma_mask) | ||||
| { | ||||
| 	struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||||
| 
 | ||||
| 	if (unlikely(dma_ops == NULL)) | ||||
| 		return -EIO; | ||||
| 	if (dma_ops->set_dma_mask != NULL) | ||||
| 		return dma_ops->set_dma_mask(dev, dma_mask); | ||||
| 	if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | ||||
| 		return -EIO; | ||||
| 	*dev->dma_mask = dma_mask; | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static inline void *dma_alloc_coherent(struct device *dev, size_t size, | ||||
| 				       dma_addr_t *dma_handle, gfp_t flag) | ||||
| { | ||||
| 	struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||||
| 
 | ||||
| 	BUG_ON(!dma_ops); | ||||
| 	return dma_ops->alloc_coherent(dev, size, dma_handle, flag); | ||||
| } | ||||
| 
 | ||||
| static inline void dma_free_coherent(struct device *dev, size_t size, | ||||
| 				     void *cpu_addr, dma_addr_t dma_handle) | ||||
| { | ||||
| 	struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||||
| 
 | ||||
| 	BUG_ON(!dma_ops); | ||||
| 	dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); | ||||
| } | ||||
| 
 | ||||
| static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | ||||
| 					size_t size, | ||||
| 					enum dma_data_direction direction) | ||||
| { | ||||
| 	struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||||
| 
 | ||||
| 	BUG_ON(!dma_ops); | ||||
| 	return dma_ops->map_single(dev, cpu_addr, size, direction); | ||||
| } | ||||
| 
 | ||||
| static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | ||||
| 				    size_t size, | ||||
| 				    enum dma_data_direction direction) | ||||
| { | ||||
| 	struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||||
| 
 | ||||
| 	BUG_ON(!dma_ops); | ||||
| 	dma_ops->unmap_single(dev, dma_addr, size, direction); | ||||
| } | ||||
| 
 | ||||
| static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||||
| 				      unsigned long offset, size_t size, | ||||
| 				      enum dma_data_direction direction) | ||||
| { | ||||
| 	struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | ||||
| 
 | ||||
| 	BUG_ON(!dma_ops); | ||||
| 	return dma_ops->map_single(dev, page_address(page) + offset, size, | ||||
| 			direction); | ||||
| } | ||||
| 
 | ||||
| static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | ||||
| 				  size_t size, | ||||
|  | @ -276,14 +133,15 @@ static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | |||
| } | ||||
| 
 | ||||
| static inline int | ||||
| dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||||
| dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents, | ||||
| 	   enum dma_data_direction direction) | ||||
| { | ||||
| 	struct scatterlist *sg; | ||||
| 	int i; | ||||
| 
 | ||||
| 	BUG_ON(direction == DMA_NONE); | ||||
| 
 | ||||
| 	for (i = 0; i < nents; i++, sg++) { | ||||
| 	for_each_sg(sgl, sg, nents, i) { | ||||
| 		BUG_ON(!sg->page); | ||||
| 		__dma_sync_page(sg->page, sg->offset, sg->length, direction); | ||||
| 		sg->dma_address = page_to_bus(sg->page) + sg->offset; | ||||
|  | @ -318,26 +176,28 @@ static inline void dma_sync_single_for_device(struct device *dev, | |||
| } | ||||
| 
 | ||||
| static inline void dma_sync_sg_for_cpu(struct device *dev, | ||||
| 		struct scatterlist *sg, int nents, | ||||
| 		struct scatterlist *sgl, int nents, | ||||
| 		enum dma_data_direction direction) | ||||
| { | ||||
| 	struct scatterlist *sg; | ||||
| 	int i; | ||||
| 
 | ||||
| 	BUG_ON(direction == DMA_NONE); | ||||
| 
 | ||||
| 	for (i = 0; i < nents; i++, sg++) | ||||
| 	for_each_sg(sgl, sg, nents, i) | ||||
| 		__dma_sync_page(sg->page, sg->offset, sg->length, direction); | ||||
| } | ||||
| 
 | ||||
| static inline void dma_sync_sg_for_device(struct device *dev, | ||||
| 		struct scatterlist *sg, int nents, | ||||
| 		struct scatterlist *sgl, int nents, | ||||
| 		enum dma_data_direction direction) | ||||
| { | ||||
| 	struct scatterlist *sg; | ||||
| 	int i; | ||||
| 
 | ||||
| 	BUG_ON(direction == DMA_NONE); | ||||
| 
 | ||||
| 	for (i = 0; i < nents; i++, sg++) | ||||
| 	for_each_sg(sgl, sg, nents, i) | ||||
| 		__dma_sync_page(sg->page, sg->offset, sg->length, direction); | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -41,5 +41,7 @@ struct scatterlist { | |||
| #define ISA_DMA_THRESHOLD	(~0UL) | ||||
| #endif | ||||
| 
 | ||||
| #define ARCH_HAS_SG_CHAIN | ||||
| 
 | ||||
| #endif /* __KERNEL__ */ | ||||
| #endif /* _ASM_POWERPC_SCATTERLIST_H */ | ||||
|  |  | |||
|  | @ -19,4 +19,6 @@ struct scatterlist { | |||
| 
 | ||||
| #define ISA_DMA_THRESHOLD (~0UL) | ||||
| 
 | ||||
| #define ARCH_HAS_SG_CHAIN | ||||
| 
 | ||||
| #endif /* !(_SPARC_SCATTERLIST_H) */ | ||||
|  |  | |||
|  | @ -20,4 +20,6 @@ struct scatterlist { | |||
| 
 | ||||
| #define ISA_DMA_THRESHOLD	(~0UL) | ||||
| 
 | ||||
| #define ARCH_HAS_SG_CHAIN | ||||
| 
 | ||||
| #endif /* !(_SPARC64_SCATTERLIST_H) */ | ||||
|  |  | |||
|  | @ -2,10 +2,10 @@ | |||
| #define _ASM_I386_DMA_MAPPING_H | ||||
| 
 | ||||
| #include <linux/mm.h> | ||||
| #include <linux/scatterlist.h> | ||||
| 
 | ||||
| #include <asm/cache.h> | ||||
| #include <asm/io.h> | ||||
| #include <asm/scatterlist.h> | ||||
| #include <asm/bug.h> | ||||
| 
 | ||||
| #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||||
|  | @ -35,18 +35,19 @@ dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
| } | ||||
| 
 | ||||
| static inline int | ||||
| dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||||
| dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | ||||
| 	   enum dma_data_direction direction) | ||||
| { | ||||
| 	struct scatterlist *sg; | ||||
| 	int i; | ||||
| 
 | ||||
| 	BUG_ON(!valid_dma_direction(direction)); | ||||
| 	WARN_ON(nents == 0 || sg[0].length == 0); | ||||
| 	WARN_ON(nents == 0 || sglist[0].length == 0); | ||||
| 
 | ||||
| 	for (i = 0; i < nents; i++ ) { | ||||
| 		BUG_ON(!sg[i].page); | ||||
| 	for_each_sg(sglist, sg, nents, i) { | ||||
| 		BUG_ON(!sg->page); | ||||
| 
 | ||||
| 		sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; | ||||
| 		sg->dma_address = page_to_phys(sg->page) + sg->offset; | ||||
| 	} | ||||
| 
 | ||||
| 	flush_write_buffers(); | ||||
|  |  | |||
|  | @ -6,8 +6,7 @@ | |||
|  * documentation. | ||||
|  */ | ||||
| 
 | ||||
| 
 | ||||
| #include <asm/scatterlist.h> | ||||
| #include <linux/scatterlist.h> | ||||
| #include <asm/io.h> | ||||
| #include <asm/swiotlb.h> | ||||
| 
 | ||||
|  |  | |||
Some files were not shown because too many files have changed in this diff Show more
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Jens Axboe
				Jens Axboe