| 
									
										
										
										
											2012-08-28 16:51:19 +02:00
										 |  |  | /*
 | 
					
						
							|  |  |  |  * Block driver for s390 storage class memory. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Copyright IBM Corp. 2012 | 
					
						
							|  |  |  |  * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com> | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #include <linux/spinlock.h>
 | 
					
						
							|  |  |  | #include <linux/module.h>
 | 
					
						
							|  |  |  | #include <linux/blkdev.h>
 | 
					
						
							|  |  |  | #include <linux/genhd.h>
 | 
					
						
							|  |  |  | #include <linux/slab.h>
 | 
					
						
							|  |  |  | #include <linux/list.h>
 | 
					
						
							|  |  |  | #include <asm/eadm.h>
 | 
					
						
							|  |  |  | #include "scm_blk.h"
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static unsigned int write_cluster_size = 64; | 
					
						
							|  |  |  | module_param(write_cluster_size, uint, S_IRUGO); | 
					
						
							|  |  |  | MODULE_PARM_DESC(write_cluster_size, | 
					
						
							|  |  |  | 		 "Number of pages used for contiguous writes."); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #define CLUSTER_SIZE (write_cluster_size * PAGE_SIZE)
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | void __scm_free_rq_cluster(struct scm_request *scmrq) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	int i; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if (!scmrq->cluster.buf) | 
					
						
							|  |  |  | 		return; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for (i = 0; i < 2 * write_cluster_size; i++) | 
					
						
							|  |  |  | 		free_page((unsigned long) scmrq->cluster.buf[i]); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	kfree(scmrq->cluster.buf); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | int __scm_alloc_rq_cluster(struct scm_request *scmrq) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	int i; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	scmrq->cluster.buf = kzalloc(sizeof(void *) * 2 * write_cluster_size, | 
					
						
							|  |  |  | 				 GFP_KERNEL); | 
					
						
							|  |  |  | 	if (!scmrq->cluster.buf) | 
					
						
							|  |  |  | 		return -ENOMEM; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for (i = 0; i < 2 * write_cluster_size; i++) { | 
					
						
							|  |  |  | 		scmrq->cluster.buf[i] = (void *) get_zeroed_page(GFP_DMA); | 
					
						
							|  |  |  | 		if (!scmrq->cluster.buf[i]) | 
					
						
							|  |  |  | 			return -ENOMEM; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	INIT_LIST_HEAD(&scmrq->cluster.list); | 
					
						
							|  |  |  | 	return 0; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | void scm_request_cluster_init(struct scm_request *scmrq) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	scmrq->cluster.state = CLUSTER_NONE; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-12-05 16:43:58 +01:00
										 |  |  | static bool clusters_intersect(struct request *A, struct request *B) | 
					
						
							| 
									
										
										
										
											2012-08-28 16:51:19 +02:00
										 |  |  | { | 
					
						
							|  |  |  | 	unsigned long firstA, lastA, firstB, lastB; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-12-05 16:43:58 +01:00
										 |  |  | 	firstA = ((u64) blk_rq_pos(A) << 9) / CLUSTER_SIZE; | 
					
						
							|  |  |  | 	lastA = (((u64) blk_rq_pos(A) << 9) + | 
					
						
							|  |  |  | 		    blk_rq_bytes(A) - 1) / CLUSTER_SIZE; | 
					
						
							| 
									
										
										
										
											2012-08-28 16:51:19 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-12-05 16:43:58 +01:00
										 |  |  | 	firstB = ((u64) blk_rq_pos(B) << 9) / CLUSTER_SIZE; | 
					
						
							|  |  |  | 	lastB = (((u64) blk_rq_pos(B) << 9) + | 
					
						
							|  |  |  | 		    blk_rq_bytes(B) - 1) / CLUSTER_SIZE; | 
					
						
							| 
									
										
										
										
											2012-08-28 16:51:19 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	return (firstB <= lastA && firstA <= lastB); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | bool scm_reserve_cluster(struct scm_request *scmrq) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2014-12-05 16:43:58 +01:00
										 |  |  | 	struct request *req = scmrq->request[scmrq->aob->request.msb_count]; | 
					
						
							| 
									
										
										
										
											2012-08-28 16:51:19 +02:00
										 |  |  | 	struct scm_blk_dev *bdev = scmrq->bdev; | 
					
						
							|  |  |  | 	struct scm_request *iter; | 
					
						
							| 
									
										
										
										
											2014-12-05 16:43:58 +01:00
										 |  |  | 	int pos, add = 1; | 
					
						
							| 
									
										
										
										
											2012-08-28 16:51:19 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	if (write_cluster_size == 0) | 
					
						
							|  |  |  | 		return true; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	spin_lock(&bdev->lock); | 
					
						
							|  |  |  | 	list_for_each_entry(iter, &bdev->cluster_list, cluster.list) { | 
					
						
							| 
									
										
										
										
											2014-12-05 16:43:58 +01:00
										 |  |  | 		if (iter == scmrq) { | 
					
						
							|  |  |  | 			/*
 | 
					
						
							|  |  |  | 			 * We don't have to use clusters_intersect here, since | 
					
						
							|  |  |  | 			 * cluster requests are always started separately. | 
					
						
							|  |  |  | 			 */ | 
					
						
							|  |  |  | 			add = 0; | 
					
						
							|  |  |  | 			continue; | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		for (pos = 0; pos <= iter->aob->request.msb_count; pos++) { | 
					
						
							|  |  |  | 			if (clusters_intersect(req, iter->request[pos]) && | 
					
						
							|  |  |  | 			    (rq_data_dir(req) == WRITE || | 
					
						
							|  |  |  | 			     rq_data_dir(iter->request[pos]) == WRITE)) { | 
					
						
							|  |  |  | 				spin_unlock(&bdev->lock); | 
					
						
							|  |  |  | 				return false; | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2012-08-28 16:51:19 +02:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2014-12-05 16:43:58 +01:00
										 |  |  | 	if (add) | 
					
						
							|  |  |  | 		list_add(&scmrq->cluster.list, &bdev->cluster_list); | 
					
						
							| 
									
										
										
										
											2012-08-28 16:51:19 +02:00
										 |  |  | 	spin_unlock(&bdev->lock); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return true; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | void scm_release_cluster(struct scm_request *scmrq) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	struct scm_blk_dev *bdev = scmrq->bdev; | 
					
						
							|  |  |  | 	unsigned long flags; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if (write_cluster_size == 0) | 
					
						
							|  |  |  | 		return; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	spin_lock_irqsave(&bdev->lock, flags); | 
					
						
							|  |  |  | 	list_del(&scmrq->cluster.list); | 
					
						
							|  |  |  | 	spin_unlock_irqrestore(&bdev->lock, flags); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	INIT_LIST_HEAD(&bdev->cluster_list); | 
					
						
							|  |  |  | 	blk_queue_io_opt(bdev->rq, CLUSTER_SIZE); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-12-05 16:32:13 +01:00
										 |  |  | static int scm_prepare_cluster_request(struct scm_request *scmrq) | 
					
						
							| 
									
										
										
										
											2012-08-28 16:51:19 +02:00
										 |  |  | { | 
					
						
							|  |  |  | 	struct scm_blk_dev *bdev = scmrq->bdev; | 
					
						
							|  |  |  | 	struct scm_device *scmdev = bdev->gendisk->private_data; | 
					
						
							| 
									
										
										
										
											2014-12-05 16:43:58 +01:00
										 |  |  | 	struct request *req = scmrq->request[0]; | 
					
						
							| 
									
										
										
										
											2012-08-28 16:51:19 +02:00
										 |  |  | 	struct msb *msb = &scmrq->aob->msb[0]; | 
					
						
							|  |  |  | 	struct req_iterator iter; | 
					
						
							| 
									
										
										
										
											2014-12-05 16:32:13 +01:00
										 |  |  | 	struct aidaw *aidaw; | 
					
						
							| 
									
										
										
										
											2013-11-23 17:19:00 -08:00
										 |  |  | 	struct bio_vec bv; | 
					
						
							| 
									
										
										
										
											2012-08-28 16:51:19 +02:00
										 |  |  | 	int i = 0; | 
					
						
							|  |  |  | 	u64 addr; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	switch (scmrq->cluster.state) { | 
					
						
							|  |  |  | 	case CLUSTER_NONE: | 
					
						
							|  |  |  | 		scmrq->cluster.state = CLUSTER_READ; | 
					
						
							|  |  |  | 		/* fall through */ | 
					
						
							|  |  |  | 	case CLUSTER_READ: | 
					
						
							|  |  |  | 		msb->bs = MSB_BS_4K; | 
					
						
							|  |  |  | 		msb->oc = MSB_OC_READ; | 
					
						
							|  |  |  | 		msb->flags = MSB_FLAG_IDA; | 
					
						
							|  |  |  | 		msb->blk_count = write_cluster_size; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		addr = scmdev->address + ((u64) blk_rq_pos(req) << 9); | 
					
						
							|  |  |  | 		msb->scm_addr = round_down(addr, CLUSTER_SIZE); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		if (msb->scm_addr != | 
					
						
							|  |  |  | 		    round_down(addr + (u64) blk_rq_bytes(req) - 1, | 
					
						
							|  |  |  | 			       CLUSTER_SIZE)) | 
					
						
							|  |  |  | 			msb->blk_count = 2 * write_cluster_size; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-12-05 16:41:47 +01:00
										 |  |  | 		aidaw = scm_aidaw_fetch(scmrq, msb->blk_count * PAGE_SIZE); | 
					
						
							|  |  |  | 		if (!aidaw) | 
					
						
							|  |  |  | 			return -ENOMEM; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		scmrq->aob->request.msb_count = 1; | 
					
						
							|  |  |  | 		msb->data_addr = (u64) aidaw; | 
					
						
							| 
									
										
										
										
											2012-08-28 16:51:19 +02:00
										 |  |  | 		for (i = 0; i < msb->blk_count; i++) { | 
					
						
							|  |  |  | 			aidaw->data_addr = (u64) scmrq->cluster.buf[i]; | 
					
						
							|  |  |  | 			aidaw++; | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		break; | 
					
						
							|  |  |  | 	case CLUSTER_WRITE: | 
					
						
							| 
									
										
										
										
											2014-12-05 16:32:13 +01:00
										 |  |  | 		aidaw = (void *) msb->data_addr; | 
					
						
							| 
									
										
										
										
											2012-08-28 16:51:19 +02:00
										 |  |  | 		msb->oc = MSB_OC_WRITE; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		for (addr = msb->scm_addr; | 
					
						
							|  |  |  | 		     addr < scmdev->address + ((u64) blk_rq_pos(req) << 9); | 
					
						
							|  |  |  | 		     addr += PAGE_SIZE) { | 
					
						
							|  |  |  | 			aidaw->data_addr = (u64) scmrq->cluster.buf[i]; | 
					
						
							|  |  |  | 			aidaw++; | 
					
						
							|  |  |  | 			i++; | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		rq_for_each_segment(bv, req, iter) { | 
					
						
							| 
									
										
										
										
											2013-11-23 17:19:00 -08:00
										 |  |  | 			aidaw->data_addr = (u64) page_address(bv.bv_page); | 
					
						
							| 
									
										
										
										
											2012-08-28 16:51:19 +02:00
										 |  |  | 			aidaw++; | 
					
						
							|  |  |  | 			i++; | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		for (; i < msb->blk_count; i++) { | 
					
						
							|  |  |  | 			aidaw->data_addr = (u64) scmrq->cluster.buf[i]; | 
					
						
							|  |  |  | 			aidaw++; | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		break; | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2014-12-05 16:32:13 +01:00
										 |  |  | 	return 0; | 
					
						
							| 
									
										
										
										
											2012-08-28 16:51:19 +02:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | bool scm_need_cluster_request(struct scm_request *scmrq) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2014-12-05 16:43:58 +01:00
										 |  |  | 	int pos = scmrq->aob->request.msb_count; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if (rq_data_dir(scmrq->request[pos]) == READ) | 
					
						
							| 
									
										
										
										
											2012-08-28 16:51:19 +02:00
										 |  |  | 		return false; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-12-05 16:43:58 +01:00
										 |  |  | 	return blk_rq_bytes(scmrq->request[pos]) < CLUSTER_SIZE; | 
					
						
							| 
									
										
										
										
											2012-08-28 16:51:19 +02:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* Called with queue lock held. */ | 
					
						
							|  |  |  | void scm_initiate_cluster_request(struct scm_request *scmrq) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2014-12-05 16:32:13 +01:00
										 |  |  | 	if (scm_prepare_cluster_request(scmrq)) | 
					
						
							|  |  |  | 		goto requeue; | 
					
						
							| 
									
										
										
										
											2013-11-14 10:44:56 +01:00
										 |  |  | 	if (eadm_start_aob(scmrq->aob)) | 
					
						
							| 
									
										
										
										
											2014-12-05 16:32:13 +01:00
										 |  |  | 		goto requeue; | 
					
						
							|  |  |  | 	return; | 
					
						
							|  |  |  | requeue: | 
					
						
							|  |  |  | 	scm_request_requeue(scmrq); | 
					
						
							| 
									
										
										
										
											2012-08-28 16:51:19 +02:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | bool scm_test_cluster_request(struct scm_request *scmrq) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	return scmrq->cluster.state != CLUSTER_NONE; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | void scm_cluster_request_irq(struct scm_request *scmrq) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	struct scm_blk_dev *bdev = scmrq->bdev; | 
					
						
							|  |  |  | 	unsigned long flags; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	switch (scmrq->cluster.state) { | 
					
						
							|  |  |  | 	case CLUSTER_NONE: | 
					
						
							|  |  |  | 		BUG(); | 
					
						
							|  |  |  | 		break; | 
					
						
							|  |  |  | 	case CLUSTER_READ: | 
					
						
							|  |  |  | 		if (scmrq->error) { | 
					
						
							|  |  |  | 			scm_request_finish(scmrq); | 
					
						
							|  |  |  | 			break; | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		scmrq->cluster.state = CLUSTER_WRITE; | 
					
						
							|  |  |  | 		spin_lock_irqsave(&bdev->rq_lock, flags); | 
					
						
							|  |  |  | 		scm_initiate_cluster_request(scmrq); | 
					
						
							|  |  |  | 		spin_unlock_irqrestore(&bdev->rq_lock, flags); | 
					
						
							|  |  |  | 		break; | 
					
						
							|  |  |  | 	case CLUSTER_WRITE: | 
					
						
							|  |  |  | 		scm_request_finish(scmrq); | 
					
						
							|  |  |  | 		break; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | bool scm_cluster_size_valid(void) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2013-04-25 13:02:39 +02:00
										 |  |  | 	if (write_cluster_size == 1 || write_cluster_size > 128) | 
					
						
							|  |  |  | 		return false; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return !(write_cluster_size & (write_cluster_size - 1)); | 
					
						
							| 
									
										
										
										
											2012-08-28 16:51:19 +02:00
										 |  |  | } |