NVMe: Fix double free irq
Sets the vector to an invalid value after it's freed so we don't free it twice. Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
		
					parent
					
						
							
								b4c6a02877
							
						
					
				
			
			
				commit
				
					
						2b25d98179
					
				
			
		
					 1 changed files with 12 additions and 5 deletions
				
			
		|  | @ -1131,10 +1131,16 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest) | ||||||
|  */ |  */ | ||||||
| static int nvme_suspend_queue(struct nvme_queue *nvmeq) | static int nvme_suspend_queue(struct nvme_queue *nvmeq) | ||||||
| { | { | ||||||
| 	int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector; | 	int vector; | ||||||
| 
 | 
 | ||||||
| 	spin_lock_irq(&nvmeq->q_lock); | 	spin_lock_irq(&nvmeq->q_lock); | ||||||
|  | 	if (nvmeq->cq_vector == -1) { | ||||||
|  | 		spin_unlock_irq(&nvmeq->q_lock); | ||||||
|  | 		return 1; | ||||||
|  | 	} | ||||||
|  | 	vector = nvmeq->dev->entry[nvmeq->cq_vector].vector; | ||||||
| 	nvmeq->dev->online_queues--; | 	nvmeq->dev->online_queues--; | ||||||
|  | 	nvmeq->cq_vector = -1; | ||||||
| 	spin_unlock_irq(&nvmeq->q_lock); | 	spin_unlock_irq(&nvmeq->q_lock); | ||||||
| 
 | 
 | ||||||
| 	irq_set_affinity_hint(vector, NULL); | 	irq_set_affinity_hint(vector, NULL); | ||||||
|  | @ -1173,7 +1179,7 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, | static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, | ||||||
| 							int depth, int vector) | 							int depth) | ||||||
| { | { | ||||||
| 	struct device *dmadev = &dev->pci_dev->dev; | 	struct device *dmadev = &dev->pci_dev->dev; | ||||||
| 	struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL); | 	struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL); | ||||||
|  | @ -1199,7 +1205,6 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, | ||||||
| 	nvmeq->cq_phase = 1; | 	nvmeq->cq_phase = 1; | ||||||
| 	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; | 	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; | ||||||
| 	nvmeq->q_depth = depth; | 	nvmeq->q_depth = depth; | ||||||
| 	nvmeq->cq_vector = vector; |  | ||||||
| 	nvmeq->qid = qid; | 	nvmeq->qid = qid; | ||||||
| 	dev->queue_count++; | 	dev->queue_count++; | ||||||
| 	dev->queues[qid] = nvmeq; | 	dev->queues[qid] = nvmeq; | ||||||
|  | @ -1244,6 +1249,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) | ||||||
| 	struct nvme_dev *dev = nvmeq->dev; | 	struct nvme_dev *dev = nvmeq->dev; | ||||||
| 	int result; | 	int result; | ||||||
| 
 | 
 | ||||||
|  | 	nvmeq->cq_vector = qid - 1; | ||||||
| 	result = adapter_alloc_cq(dev, qid, nvmeq); | 	result = adapter_alloc_cq(dev, qid, nvmeq); | ||||||
| 	if (result < 0) | 	if (result < 0) | ||||||
| 		return result; | 		return result; | ||||||
|  | @ -1416,7 +1422,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) | ||||||
| 
 | 
 | ||||||
| 	nvmeq = dev->queues[0]; | 	nvmeq = dev->queues[0]; | ||||||
| 	if (!nvmeq) { | 	if (!nvmeq) { | ||||||
| 		nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, 0); | 		nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); | ||||||
| 		if (!nvmeq) | 		if (!nvmeq) | ||||||
| 			return -ENOMEM; | 			return -ENOMEM; | ||||||
| 	} | 	} | ||||||
|  | @ -1443,6 +1449,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) | ||||||
| 	if (result) | 	if (result) | ||||||
| 		goto free_nvmeq; | 		goto free_nvmeq; | ||||||
| 
 | 
 | ||||||
|  | 	nvmeq->cq_vector = 0; | ||||||
| 	result = queue_request_irq(dev, nvmeq, nvmeq->irqname); | 	result = queue_request_irq(dev, nvmeq, nvmeq->irqname); | ||||||
| 	if (result) | 	if (result) | ||||||
| 		goto free_tags; | 		goto free_tags; | ||||||
|  | @ -1944,7 +1951,7 @@ static void nvme_create_io_queues(struct nvme_dev *dev) | ||||||
| 	unsigned i; | 	unsigned i; | ||||||
| 
 | 
 | ||||||
| 	for (i = dev->queue_count; i <= dev->max_qid; i++) | 	for (i = dev->queue_count; i <= dev->max_qid; i++) | ||||||
| 		if (!nvme_alloc_queue(dev, i, dev->q_depth, i - 1)) | 		if (!nvme_alloc_queue(dev, i, dev->q_depth)) | ||||||
| 			break; | 			break; | ||||||
| 
 | 
 | ||||||
| 	for (i = dev->online_queues; i <= dev->queue_count - 1; i++) | 	for (i = dev->online_queues; i <= dev->queue_count - 1; i++) | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Keith Busch
				Keith Busch