Some nice cleanups, and even a patch my wife did as a "live" demo for
Latinoware 2012. There's a slightly non-trivial merge in virtio-net, as we cleaned up the virtio add_buf interface while DaveM accepted the mq virtio-net patches. You can see my solution in my pending-rebases branch, if that helps, but I know you love merging: https://git.kernel.org/?p=linux/kernel/git/rusty/linux.git;a=commit;h=12e4e64fa66a4c812e4855de32abdb4d819526fe Cheers, Rusty. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.11 (GNU/Linux) iQIcBAABAgAGBQJQz/vKAAoJENkgDmzRrbjx+eYQAK/egj9T8Nnth6mkzdbCFSO7 Bciga2hDiudGCiGojTRGPRSc0VP9LgfvPbY2pxX+R9CfEqR+a8q/rRQhCS79ZwPB /mJy3HNiCx418HZxgwNtk6vPe0PjJm6SsjbXeB9hB+PQLCbdwA0BjpG6xjF/jitP noPqhhXreeQgYVxAKoFPvff/Byu2GlNnDdVMQxWRmo8hTKlTCzl0T/7BHRxthhJj iOrXTFzrT/osPT0zyqlngT03T4wlBvL2Bfw8d/kuRPEZ71dpIctWeH2KzdwXVCrz hFQGxAz4OWvW3xrNwj7c6O3SWj4VemUMjQqeA/PtRiOEI5gM0Y/Bit47dWL4wM/O OWUKFHzq4DFs8MmwXBgDDXl5xOjOBH9Ik4FZayn3Y7COT/B8CjFdOC2MdDGmZ9yd NInumg7FqP+u12g+9Vq8S/b0cfoQm4qFe8VHiPJu+jRmCZglyvLjk7oq/QwW8Gaq Pkzit1Ey0DWo2KvZ4D/nuXJCuhmzN/AJ10M48lLYZhtOIVg9gsa0xjhfgq4FnvSK xFCf3rcWnlGIXcOYh/hKU25WaCLzBuqMuSK35A72IujrQOL7OJTk4Oqote3Z3H9B 08XJmyW6SOZdfw17X4Im1jbyuLek///xQJ9Jw/tya7j9lBt8zjJ+FmLPs4mLGEOm WJv9uZPs+QbIMNky2Lcb =myDR -----END PGP SIGNATURE----- Merge tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux Pull virtio update from Rusty Russell: "Some nice cleanups, and even a patch my wife did as a "live" demo for Latinoware 2012. There's a slightly non-trivial merge in virtio-net, as we cleaned up the virtio add_buf interface while DaveM accepted the mq virtio-net patches." * tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux: (27 commits) virtio_console: Add support for remoteproc serial virtio_console: Merge struct buffer_token into struct port_buffer virtio: add drv_to_virtio to make code clearly virtio: use dev_to_virtio wrapper in virtio virtio-mmio: Fix irq parsing in command line parameter virtio_console: Free buffers from out-queue upon close virtio: Convert dev_printk(KERN_<LEVEL> to dev_<level>( virtio_console: Use kmalloc instead of kzalloc virtio_console: Free buffer if splice fails virtio: tools: make it clear that virtqueue_add_buf() no longer returns > 0 virtio: scsi: make it clear that virtqueue_add_buf() no longer returns > 0 virtio: rpmsg: make it clear that virtqueue_add_buf() no longer returns > 0 virtio: net: make it clear that virtqueue_add_buf() no longer returns > 0 virtio: console: make it clear that virtqueue_add_buf() no longer returns > 0 virtio: make virtqueue_add_buf() returning 0 on success, not capacity. virtio: console: don't rely on virtqueue_add_buf() returning capacity. virtio_net: don't rely on virtqueue_add_buf() returning capacity. virtio-net: remove unused skb_vnet_hdr->num_sg field virtio-net: correct capacity math on ring full virtio: move queue_index and num_free fields into core struct virtqueue. ...
This commit is contained in:
		
				commit
				
					
						b7dfde956d
					
				
			
		
					 17 changed files with 411 additions and 275 deletions
				
			
		|  | @ -37,8 +37,12 @@ | ||||||
| #include <linux/wait.h> | #include <linux/wait.h> | ||||||
| #include <linux/workqueue.h> | #include <linux/workqueue.h> | ||||||
| #include <linux/module.h> | #include <linux/module.h> | ||||||
|  | #include <linux/dma-mapping.h> | ||||||
|  | #include <linux/kconfig.h> | ||||||
| #include "../tty/hvc/hvc_console.h" | #include "../tty/hvc/hvc_console.h" | ||||||
| 
 | 
 | ||||||
|  | #define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC) | ||||||
|  | 
 | ||||||
| /*
 | /*
 | ||||||
|  * This is a global struct for storing common data for all the devices |  * This is a global struct for storing common data for all the devices | ||||||
|  * this driver handles. |  * this driver handles. | ||||||
|  | @ -111,6 +115,21 @@ struct port_buffer { | ||||||
| 	size_t len; | 	size_t len; | ||||||
| 	/* offset in the buf from which to consume data */ | 	/* offset in the buf from which to consume data */ | ||||||
| 	size_t offset; | 	size_t offset; | ||||||
|  | 
 | ||||||
|  | 	/* DMA address of buffer */ | ||||||
|  | 	dma_addr_t dma; | ||||||
|  | 
 | ||||||
|  | 	/* Device we got DMA memory from */ | ||||||
|  | 	struct device *dev; | ||||||
|  | 
 | ||||||
|  | 	/* List of pending dma buffers to free */ | ||||||
|  | 	struct list_head list; | ||||||
|  | 
 | ||||||
|  | 	/* If sgpages == 0 then buf is used */ | ||||||
|  | 	unsigned int sgpages; | ||||||
|  | 
 | ||||||
|  | 	/* sg is used if spages > 0. sg must be the last in is struct */ | ||||||
|  | 	struct scatterlist sg[0]; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  | @ -325,6 +344,11 @@ static bool is_console_port(struct port *port) | ||||||
| 	return false; | 	return false; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | static bool is_rproc_serial(const struct virtio_device *vdev) | ||||||
|  | { | ||||||
|  | 	return is_rproc_enabled && vdev->id.device == VIRTIO_ID_RPROC_SERIAL; | ||||||
|  | } | ||||||
|  | 
 | ||||||
| static inline bool use_multiport(struct ports_device *portdev) | static inline bool use_multiport(struct ports_device *portdev) | ||||||
| { | { | ||||||
| 	/*
 | 	/*
 | ||||||
|  | @ -336,20 +360,110 @@ static inline bool use_multiport(struct ports_device *portdev) | ||||||
| 	return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT); | 	return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void free_buf(struct port_buffer *buf) | static DEFINE_SPINLOCK(dma_bufs_lock); | ||||||
|  | static LIST_HEAD(pending_free_dma_bufs); | ||||||
|  | 
 | ||||||
|  | static void free_buf(struct port_buffer *buf, bool can_sleep) | ||||||
| { | { | ||||||
|  | 	unsigned int i; | ||||||
|  | 
 | ||||||
|  | 	for (i = 0; i < buf->sgpages; i++) { | ||||||
|  | 		struct page *page = sg_page(&buf->sg[i]); | ||||||
|  | 		if (!page) | ||||||
|  | 			break; | ||||||
|  | 		put_page(page); | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if (!buf->dev) { | ||||||
| 		kfree(buf->buf); | 		kfree(buf->buf); | ||||||
|  | 	} else if (is_rproc_enabled) { | ||||||
|  | 		unsigned long flags; | ||||||
|  | 
 | ||||||
|  | 		/* dma_free_coherent requires interrupts to be enabled. */ | ||||||
|  | 		if (!can_sleep) { | ||||||
|  | 			/* queue up dma-buffers to be freed later */ | ||||||
|  | 			spin_lock_irqsave(&dma_bufs_lock, flags); | ||||||
|  | 			list_add_tail(&buf->list, &pending_free_dma_bufs); | ||||||
|  | 			spin_unlock_irqrestore(&dma_bufs_lock, flags); | ||||||
|  | 			return; | ||||||
|  | 		} | ||||||
|  | 		dma_free_coherent(buf->dev, buf->size, buf->buf, buf->dma); | ||||||
|  | 
 | ||||||
|  | 		/* Release device refcnt and allow it to be freed */ | ||||||
|  | 		put_device(buf->dev); | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	kfree(buf); | 	kfree(buf); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static struct port_buffer *alloc_buf(size_t buf_size) | static void reclaim_dma_bufs(void) | ||||||
|  | { | ||||||
|  | 	unsigned long flags; | ||||||
|  | 	struct port_buffer *buf, *tmp; | ||||||
|  | 	LIST_HEAD(tmp_list); | ||||||
|  | 
 | ||||||
|  | 	if (list_empty(&pending_free_dma_bufs)) | ||||||
|  | 		return; | ||||||
|  | 
 | ||||||
|  | 	/* Create a copy of the pending_free_dma_bufs while holding the lock */ | ||||||
|  | 	spin_lock_irqsave(&dma_bufs_lock, flags); | ||||||
|  | 	list_cut_position(&tmp_list, &pending_free_dma_bufs, | ||||||
|  | 			  pending_free_dma_bufs.prev); | ||||||
|  | 	spin_unlock_irqrestore(&dma_bufs_lock, flags); | ||||||
|  | 
 | ||||||
|  | 	/* Release the dma buffers, without irqs enabled */ | ||||||
|  | 	list_for_each_entry_safe(buf, tmp, &tmp_list, list) { | ||||||
|  | 		list_del(&buf->list); | ||||||
|  | 		free_buf(buf, true); | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size, | ||||||
|  | 				     int pages) | ||||||
| { | { | ||||||
| 	struct port_buffer *buf; | 	struct port_buffer *buf; | ||||||
| 
 | 
 | ||||||
| 	buf = kmalloc(sizeof(*buf), GFP_KERNEL); | 	reclaim_dma_bufs(); | ||||||
|  | 
 | ||||||
|  | 	/*
 | ||||||
|  | 	 * Allocate buffer and the sg list. The sg list array is allocated | ||||||
|  | 	 * directly after the port_buffer struct. | ||||||
|  | 	 */ | ||||||
|  | 	buf = kmalloc(sizeof(*buf) + sizeof(struct scatterlist) * pages, | ||||||
|  | 		      GFP_KERNEL); | ||||||
| 	if (!buf) | 	if (!buf) | ||||||
| 		goto fail; | 		goto fail; | ||||||
| 	buf->buf = kzalloc(buf_size, GFP_KERNEL); | 
 | ||||||
|  | 	buf->sgpages = pages; | ||||||
|  | 	if (pages > 0) { | ||||||
|  | 		buf->dev = NULL; | ||||||
|  | 		buf->buf = NULL; | ||||||
|  | 		return buf; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if (is_rproc_serial(vq->vdev)) { | ||||||
|  | 		/*
 | ||||||
|  | 		 * Allocate DMA memory from ancestor. When a virtio | ||||||
|  | 		 * device is created by remoteproc, the DMA memory is | ||||||
|  | 		 * associated with the grandparent device: | ||||||
|  | 		 * vdev => rproc => platform-dev. | ||||||
|  | 		 * The code here would have been less quirky if | ||||||
|  | 		 * DMA_MEMORY_INCLUDES_CHILDREN had been supported | ||||||
|  | 		 * in dma-coherent.c | ||||||
|  | 		 */ | ||||||
|  | 		if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent) | ||||||
|  | 			goto free_buf; | ||||||
|  | 		buf->dev = vq->vdev->dev.parent->parent; | ||||||
|  | 
 | ||||||
|  | 		/* Increase device refcnt to avoid freeing it */ | ||||||
|  | 		get_device(buf->dev); | ||||||
|  | 		buf->buf = dma_alloc_coherent(buf->dev, buf_size, &buf->dma, | ||||||
|  | 					      GFP_KERNEL); | ||||||
|  | 	} else { | ||||||
|  | 		buf->dev = NULL; | ||||||
|  | 		buf->buf = kmalloc(buf_size, GFP_KERNEL); | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	if (!buf->buf) | 	if (!buf->buf) | ||||||
| 		goto free_buf; | 		goto free_buf; | ||||||
| 	buf->len = 0; | 	buf->len = 0; | ||||||
|  | @ -396,6 +510,8 @@ static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf) | ||||||
| 
 | 
 | ||||||
| 	ret = virtqueue_add_buf(vq, sg, 0, 1, buf, GFP_ATOMIC); | 	ret = virtqueue_add_buf(vq, sg, 0, 1, buf, GFP_ATOMIC); | ||||||
| 	virtqueue_kick(vq); | 	virtqueue_kick(vq); | ||||||
|  | 	if (!ret) | ||||||
|  | 		ret = vq->num_free; | ||||||
| 	return ret; | 	return ret; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -416,7 +532,7 @@ static void discard_port_data(struct port *port) | ||||||
| 		port->stats.bytes_discarded += buf->len - buf->offset; | 		port->stats.bytes_discarded += buf->len - buf->offset; | ||||||
| 		if (add_inbuf(port->in_vq, buf) < 0) { | 		if (add_inbuf(port->in_vq, buf) < 0) { | ||||||
| 			err++; | 			err++; | ||||||
| 			free_buf(buf); | 			free_buf(buf, false); | ||||||
| 		} | 		} | ||||||
| 		port->inbuf = NULL; | 		port->inbuf = NULL; | ||||||
| 		buf = get_inbuf(port); | 		buf = get_inbuf(port); | ||||||
|  | @ -459,7 +575,7 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id, | ||||||
| 	vq = portdev->c_ovq; | 	vq = portdev->c_ovq; | ||||||
| 
 | 
 | ||||||
| 	sg_init_one(sg, &cpkt, sizeof(cpkt)); | 	sg_init_one(sg, &cpkt, sizeof(cpkt)); | ||||||
| 	if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) >= 0) { | 	if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) { | ||||||
| 		virtqueue_kick(vq); | 		virtqueue_kick(vq); | ||||||
| 		while (!virtqueue_get_buf(vq, &len)) | 		while (!virtqueue_get_buf(vq, &len)) | ||||||
| 			cpu_relax(); | 			cpu_relax(); | ||||||
|  | @ -476,55 +592,29 @@ static ssize_t send_control_msg(struct port *port, unsigned int event, | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| struct buffer_token { |  | ||||||
| 	union { |  | ||||||
| 		void *buf; |  | ||||||
| 		struct scatterlist *sg; |  | ||||||
| 	} u; |  | ||||||
| 	/* If sgpages == 0 then buf is used, else sg is used */ |  | ||||||
| 	unsigned int sgpages; |  | ||||||
| }; |  | ||||||
| 
 |  | ||||||
| static void reclaim_sg_pages(struct scatterlist *sg, unsigned int nrpages) |  | ||||||
| { |  | ||||||
| 	int i; |  | ||||||
| 	struct page *page; |  | ||||||
| 
 |  | ||||||
| 	for (i = 0; i < nrpages; i++) { |  | ||||||
| 		page = sg_page(&sg[i]); |  | ||||||
| 		if (!page) |  | ||||||
| 			break; |  | ||||||
| 		put_page(page); |  | ||||||
| 	} |  | ||||||
| 	kfree(sg); |  | ||||||
| } |  | ||||||
| 
 | 
 | ||||||
| /* Callers must take the port->outvq_lock */ | /* Callers must take the port->outvq_lock */ | ||||||
| static void reclaim_consumed_buffers(struct port *port) | static void reclaim_consumed_buffers(struct port *port) | ||||||
| { | { | ||||||
| 	struct buffer_token *tok; | 	struct port_buffer *buf; | ||||||
| 	unsigned int len; | 	unsigned int len; | ||||||
| 
 | 
 | ||||||
| 	if (!port->portdev) { | 	if (!port->portdev) { | ||||||
| 		/* Device has been unplugged.  vqs are already gone. */ | 		/* Device has been unplugged.  vqs are already gone. */ | ||||||
| 		return; | 		return; | ||||||
| 	} | 	} | ||||||
| 	while ((tok = virtqueue_get_buf(port->out_vq, &len))) { | 	while ((buf = virtqueue_get_buf(port->out_vq, &len))) { | ||||||
| 		if (tok->sgpages) | 		free_buf(buf, false); | ||||||
| 			reclaim_sg_pages(tok->u.sg, tok->sgpages); |  | ||||||
| 		else |  | ||||||
| 			kfree(tok->u.buf); |  | ||||||
| 		kfree(tok); |  | ||||||
| 		port->outvq_full = false; | 		port->outvq_full = false; | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static ssize_t __send_to_port(struct port *port, struct scatterlist *sg, | static ssize_t __send_to_port(struct port *port, struct scatterlist *sg, | ||||||
| 			      int nents, size_t in_count, | 			      int nents, size_t in_count, | ||||||
| 			      struct buffer_token *tok, bool nonblock) | 			      void *data, bool nonblock) | ||||||
| { | { | ||||||
| 	struct virtqueue *out_vq; | 	struct virtqueue *out_vq; | ||||||
| 	ssize_t ret; | 	int err; | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
| 	unsigned int len; | 	unsigned int len; | ||||||
| 
 | 
 | ||||||
|  | @ -534,17 +624,17 @@ static ssize_t __send_to_port(struct port *port, struct scatterlist *sg, | ||||||
| 
 | 
 | ||||||
| 	reclaim_consumed_buffers(port); | 	reclaim_consumed_buffers(port); | ||||||
| 
 | 
 | ||||||
| 	ret = virtqueue_add_buf(out_vq, sg, nents, 0, tok, GFP_ATOMIC); | 	err = virtqueue_add_buf(out_vq, sg, nents, 0, data, GFP_ATOMIC); | ||||||
| 
 | 
 | ||||||
| 	/* Tell Host to go! */ | 	/* Tell Host to go! */ | ||||||
| 	virtqueue_kick(out_vq); | 	virtqueue_kick(out_vq); | ||||||
| 
 | 
 | ||||||
| 	if (ret < 0) { | 	if (err) { | ||||||
| 		in_count = 0; | 		in_count = 0; | ||||||
| 		goto done; | 		goto done; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if (ret == 0) | 	if (out_vq->num_free == 0) | ||||||
| 		port->outvq_full = true; | 		port->outvq_full = true; | ||||||
| 
 | 
 | ||||||
| 	if (nonblock) | 	if (nonblock) | ||||||
|  | @ -572,37 +662,6 @@ done: | ||||||
| 	return in_count; | 	return in_count; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count, |  | ||||||
| 			bool nonblock) |  | ||||||
| { |  | ||||||
| 	struct scatterlist sg[1]; |  | ||||||
| 	struct buffer_token *tok; |  | ||||||
| 
 |  | ||||||
| 	tok = kmalloc(sizeof(*tok), GFP_ATOMIC); |  | ||||||
| 	if (!tok) |  | ||||||
| 		return -ENOMEM; |  | ||||||
| 	tok->sgpages = 0; |  | ||||||
| 	tok->u.buf = in_buf; |  | ||||||
| 
 |  | ||||||
| 	sg_init_one(sg, in_buf, in_count); |  | ||||||
| 
 |  | ||||||
| 	return __send_to_port(port, sg, 1, in_count, tok, nonblock); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static ssize_t send_pages(struct port *port, struct scatterlist *sg, int nents, |  | ||||||
| 			  size_t in_count, bool nonblock) |  | ||||||
| { |  | ||||||
| 	struct buffer_token *tok; |  | ||||||
| 
 |  | ||||||
| 	tok = kmalloc(sizeof(*tok), GFP_ATOMIC); |  | ||||||
| 	if (!tok) |  | ||||||
| 		return -ENOMEM; |  | ||||||
| 	tok->sgpages = nents; |  | ||||||
| 	tok->u.sg = sg; |  | ||||||
| 
 |  | ||||||
| 	return __send_to_port(port, sg, nents, in_count, tok, nonblock); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /*
 | /*
 | ||||||
|  * Give out the data that's requested from the buffer that we have |  * Give out the data that's requested from the buffer that we have | ||||||
|  * queued up. |  * queued up. | ||||||
|  | @ -748,9 +807,10 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, | ||||||
| 			       size_t count, loff_t *offp) | 			       size_t count, loff_t *offp) | ||||||
| { | { | ||||||
| 	struct port *port; | 	struct port *port; | ||||||
| 	char *buf; | 	struct port_buffer *buf; | ||||||
| 	ssize_t ret; | 	ssize_t ret; | ||||||
| 	bool nonblock; | 	bool nonblock; | ||||||
|  | 	struct scatterlist sg[1]; | ||||||
| 
 | 
 | ||||||
| 	/* Userspace could be out to fool us */ | 	/* Userspace could be out to fool us */ | ||||||
| 	if (!count) | 	if (!count) | ||||||
|  | @ -766,11 +826,11 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, | ||||||
| 
 | 
 | ||||||
| 	count = min((size_t)(32 * 1024), count); | 	count = min((size_t)(32 * 1024), count); | ||||||
| 
 | 
 | ||||||
| 	buf = kmalloc(count, GFP_KERNEL); | 	buf = alloc_buf(port->out_vq, count, 0); | ||||||
| 	if (!buf) | 	if (!buf) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
| 	ret = copy_from_user(buf, ubuf, count); | 	ret = copy_from_user(buf->buf, ubuf, count); | ||||||
| 	if (ret) { | 	if (ret) { | ||||||
| 		ret = -EFAULT; | 		ret = -EFAULT; | ||||||
| 		goto free_buf; | 		goto free_buf; | ||||||
|  | @ -784,13 +844,14 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, | ||||||
| 	 * through to the host. | 	 * through to the host. | ||||||
| 	 */ | 	 */ | ||||||
| 	nonblock = true; | 	nonblock = true; | ||||||
| 	ret = send_buf(port, buf, count, nonblock); | 	sg_init_one(sg, buf->buf, count); | ||||||
|  | 	ret = __send_to_port(port, sg, 1, count, buf, nonblock); | ||||||
| 
 | 
 | ||||||
| 	if (nonblock && ret > 0) | 	if (nonblock && ret > 0) | ||||||
| 		goto out; | 		goto out; | ||||||
| 
 | 
 | ||||||
| free_buf: | free_buf: | ||||||
| 	kfree(buf); | 	free_buf(buf, true); | ||||||
| out: | out: | ||||||
| 	return ret; | 	return ret; | ||||||
| } | } | ||||||
|  | @ -856,6 +917,7 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, | ||||||
| 	struct port *port = filp->private_data; | 	struct port *port = filp->private_data; | ||||||
| 	struct sg_list sgl; | 	struct sg_list sgl; | ||||||
| 	ssize_t ret; | 	ssize_t ret; | ||||||
|  | 	struct port_buffer *buf; | ||||||
| 	struct splice_desc sd = { | 	struct splice_desc sd = { | ||||||
| 		.total_len = len, | 		.total_len = len, | ||||||
| 		.flags = flags, | 		.flags = flags, | ||||||
|  | @ -863,22 +925,34 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, | ||||||
| 		.u.data = &sgl, | 		.u.data = &sgl, | ||||||
| 	}; | 	}; | ||||||
| 
 | 
 | ||||||
|  | 	/*
 | ||||||
|  | 	 * Rproc_serial does not yet support splice. To support splice | ||||||
|  | 	 * pipe_to_sg() must allocate dma-buffers and copy content from | ||||||
|  | 	 * regular pages to dma pages. And alloc_buf and free_buf must | ||||||
|  | 	 * support allocating and freeing such a list of dma-buffers. | ||||||
|  | 	 */ | ||||||
|  | 	if (is_rproc_serial(port->out_vq->vdev)) | ||||||
|  | 		return -EINVAL; | ||||||
|  | 
 | ||||||
| 	ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK); | 	ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK); | ||||||
| 	if (ret < 0) | 	if (ret < 0) | ||||||
| 		return ret; | 		return ret; | ||||||
| 
 | 
 | ||||||
|  | 	buf = alloc_buf(port->out_vq, 0, pipe->nrbufs); | ||||||
|  | 	if (!buf) | ||||||
|  | 		return -ENOMEM; | ||||||
|  | 
 | ||||||
| 	sgl.n = 0; | 	sgl.n = 0; | ||||||
| 	sgl.len = 0; | 	sgl.len = 0; | ||||||
| 	sgl.size = pipe->nrbufs; | 	sgl.size = pipe->nrbufs; | ||||||
| 	sgl.sg = kmalloc(sizeof(struct scatterlist) * sgl.size, GFP_KERNEL); | 	sgl.sg = buf->sg; | ||||||
| 	if (unlikely(!sgl.sg)) |  | ||||||
| 		return -ENOMEM; |  | ||||||
| 
 |  | ||||||
| 	sg_init_table(sgl.sg, sgl.size); | 	sg_init_table(sgl.sg, sgl.size); | ||||||
| 	ret = __splice_from_pipe(pipe, &sd, pipe_to_sg); | 	ret = __splice_from_pipe(pipe, &sd, pipe_to_sg); | ||||||
| 	if (likely(ret > 0)) | 	if (likely(ret > 0)) | ||||||
| 		ret = send_pages(port, sgl.sg, sgl.n, sgl.len, true); | 		ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true); | ||||||
| 
 | 
 | ||||||
|  | 	if (unlikely(ret <= 0)) | ||||||
|  | 		free_buf(buf, true); | ||||||
| 	return ret; | 	return ret; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -927,6 +1001,7 @@ static int port_fops_release(struct inode *inode, struct file *filp) | ||||||
| 	reclaim_consumed_buffers(port); | 	reclaim_consumed_buffers(port); | ||||||
| 	spin_unlock_irq(&port->outvq_lock); | 	spin_unlock_irq(&port->outvq_lock); | ||||||
| 
 | 
 | ||||||
|  | 	reclaim_dma_bufs(); | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Locks aren't necessary here as a port can't be opened after | 	 * Locks aren't necessary here as a port can't be opened after | ||||||
| 	 * unplug, and if a port isn't unplugged, a kref would already | 	 * unplug, and if a port isn't unplugged, a kref would already | ||||||
|  | @ -1031,6 +1106,7 @@ static const struct file_operations port_fops = { | ||||||
| static int put_chars(u32 vtermno, const char *buf, int count) | static int put_chars(u32 vtermno, const char *buf, int count) | ||||||
| { | { | ||||||
| 	struct port *port; | 	struct port *port; | ||||||
|  | 	struct scatterlist sg[1]; | ||||||
| 
 | 
 | ||||||
| 	if (unlikely(early_put_chars)) | 	if (unlikely(early_put_chars)) | ||||||
| 		return early_put_chars(vtermno, buf, count); | 		return early_put_chars(vtermno, buf, count); | ||||||
|  | @ -1039,7 +1115,8 @@ static int put_chars(u32 vtermno, const char *buf, int count) | ||||||
| 	if (!port) | 	if (!port) | ||||||
| 		return -EPIPE; | 		return -EPIPE; | ||||||
| 
 | 
 | ||||||
| 	return send_buf(port, (void *)buf, count, false); | 	sg_init_one(sg, buf, count); | ||||||
|  | 	return __send_to_port(port, sg, 1, count, (void *)buf, false); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  | @ -1076,7 +1153,10 @@ static void resize_console(struct port *port) | ||||||
| 		return; | 		return; | ||||||
| 
 | 
 | ||||||
| 	vdev = port->portdev->vdev; | 	vdev = port->portdev->vdev; | ||||||
| 	if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) | 
 | ||||||
|  | 	/* Don't test F_SIZE at all if we're rproc: not a valid feature! */ | ||||||
|  | 	if (!is_rproc_serial(vdev) && | ||||||
|  | 	    virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) | ||||||
| 		hvc_resize(port->cons.hvc, port->cons.ws); | 		hvc_resize(port->cons.hvc, port->cons.ws); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -1260,7 +1340,7 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) | ||||||
| 
 | 
 | ||||||
| 	nr_added_bufs = 0; | 	nr_added_bufs = 0; | ||||||
| 	do { | 	do { | ||||||
| 		buf = alloc_buf(PAGE_SIZE); | 		buf = alloc_buf(vq, PAGE_SIZE, 0); | ||||||
| 		if (!buf) | 		if (!buf) | ||||||
| 			break; | 			break; | ||||||
| 
 | 
 | ||||||
|  | @ -1268,7 +1348,7 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) | ||||||
| 		ret = add_inbuf(vq, buf); | 		ret = add_inbuf(vq, buf); | ||||||
| 		if (ret < 0) { | 		if (ret < 0) { | ||||||
| 			spin_unlock_irq(lock); | 			spin_unlock_irq(lock); | ||||||
| 			free_buf(buf); | 			free_buf(buf, true); | ||||||
| 			break; | 			break; | ||||||
| 		} | 		} | ||||||
| 		nr_added_bufs++; | 		nr_added_bufs++; | ||||||
|  | @ -1356,10 +1436,18 @@ static int add_port(struct ports_device *portdev, u32 id) | ||||||
| 		goto free_device; | 		goto free_device; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	if (is_rproc_serial(port->portdev->vdev)) | ||||||
| 		/*
 | 		/*
 | ||||||
| 	 * If we're not using multiport support, this has to be a console port | 		 * For rproc_serial assume remote processor is connected. | ||||||
|  | 		 * rproc_serial does not want the console port, only | ||||||
|  | 		 * the generic port implementation. | ||||||
|  | 		 */ | ||||||
|  | 		port->host_connected = true; | ||||||
|  | 	else if (!use_multiport(port->portdev)) { | ||||||
|  | 		/*
 | ||||||
|  | 		 * If we're not using multiport support, | ||||||
|  | 		 * this has to be a console port. | ||||||
| 		 */ | 		 */ | ||||||
| 	if (!use_multiport(port->portdev)) { |  | ||||||
| 		err = init_port_console(port); | 		err = init_port_console(port); | ||||||
| 		if (err) | 		if (err) | ||||||
| 			goto free_inbufs; | 			goto free_inbufs; | ||||||
|  | @ -1392,7 +1480,7 @@ static int add_port(struct ports_device *portdev, u32 id) | ||||||
| 
 | 
 | ||||||
| free_inbufs: | free_inbufs: | ||||||
| 	while ((buf = virtqueue_detach_unused_buf(port->in_vq))) | 	while ((buf = virtqueue_detach_unused_buf(port->in_vq))) | ||||||
| 		free_buf(buf); | 		free_buf(buf, true); | ||||||
| free_device: | free_device: | ||||||
| 	device_destroy(pdrvdata.class, port->dev->devt); | 	device_destroy(pdrvdata.class, port->dev->devt); | ||||||
| free_cdev: | free_cdev: | ||||||
|  | @ -1434,7 +1522,11 @@ static void remove_port_data(struct port *port) | ||||||
| 
 | 
 | ||||||
| 	/* Remove buffers we queued up for the Host to send us data in. */ | 	/* Remove buffers we queued up for the Host to send us data in. */ | ||||||
| 	while ((buf = virtqueue_detach_unused_buf(port->in_vq))) | 	while ((buf = virtqueue_detach_unused_buf(port->in_vq))) | ||||||
| 		free_buf(buf); | 		free_buf(buf, true); | ||||||
|  | 
 | ||||||
|  | 	/* Free pending buffers from the out-queue. */ | ||||||
|  | 	while ((buf = virtqueue_detach_unused_buf(port->out_vq))) | ||||||
|  | 		free_buf(buf, true); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  | @ -1636,7 +1728,7 @@ static void control_work_handler(struct work_struct *work) | ||||||
| 		if (add_inbuf(portdev->c_ivq, buf) < 0) { | 		if (add_inbuf(portdev->c_ivq, buf) < 0) { | ||||||
| 			dev_warn(&portdev->vdev->dev, | 			dev_warn(&portdev->vdev->dev, | ||||||
| 				 "Error adding buffer to queue\n"); | 				 "Error adding buffer to queue\n"); | ||||||
| 			free_buf(buf); | 			free_buf(buf, false); | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 	spin_unlock(&portdev->cvq_lock); | 	spin_unlock(&portdev->cvq_lock); | ||||||
|  | @ -1832,10 +1924,10 @@ static void remove_controlq_data(struct ports_device *portdev) | ||||||
| 		return; | 		return; | ||||||
| 
 | 
 | ||||||
| 	while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) | 	while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) | ||||||
| 		free_buf(buf); | 		free_buf(buf, true); | ||||||
| 
 | 
 | ||||||
| 	while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) | 	while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) | ||||||
| 		free_buf(buf); | 		free_buf(buf, true); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  | @ -1882,11 +1974,15 @@ static int virtcons_probe(struct virtio_device *vdev) | ||||||
| 
 | 
 | ||||||
| 	multiport = false; | 	multiport = false; | ||||||
| 	portdev->config.max_nr_ports = 1; | 	portdev->config.max_nr_ports = 1; | ||||||
| 	if (virtio_config_val(vdev, VIRTIO_CONSOLE_F_MULTIPORT, | 
 | ||||||
|  | 	/* Don't test MULTIPORT at all if we're rproc: not a valid feature! */ | ||||||
|  | 	if (!is_rproc_serial(vdev) && | ||||||
|  | 	    virtio_config_val(vdev, VIRTIO_CONSOLE_F_MULTIPORT, | ||||||
| 				  offsetof(struct virtio_console_config, | 				  offsetof(struct virtio_console_config, | ||||||
| 					   max_nr_ports), | 					   max_nr_ports), | ||||||
| 			      &portdev->config.max_nr_ports) == 0) | 				  &portdev->config.max_nr_ports) == 0) { | ||||||
| 		multiport = true; | 		multiport = true; | ||||||
|  | 	} | ||||||
| 
 | 
 | ||||||
| 	err = init_vqs(portdev); | 	err = init_vqs(portdev); | ||||||
| 	if (err < 0) { | 	if (err < 0) { | ||||||
|  | @ -1996,6 +2092,16 @@ static unsigned int features[] = { | ||||||
| 	VIRTIO_CONSOLE_F_MULTIPORT, | 	VIRTIO_CONSOLE_F_MULTIPORT, | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | static struct virtio_device_id rproc_serial_id_table[] = { | ||||||
|  | #if IS_ENABLED(CONFIG_REMOTEPROC) | ||||||
|  | 	{ VIRTIO_ID_RPROC_SERIAL, VIRTIO_DEV_ANY_ID }, | ||||||
|  | #endif | ||||||
|  | 	{ 0 }, | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | static unsigned int rproc_serial_features[] = { | ||||||
|  | }; | ||||||
|  | 
 | ||||||
| #ifdef CONFIG_PM | #ifdef CONFIG_PM | ||||||
| static int virtcons_freeze(struct virtio_device *vdev) | static int virtcons_freeze(struct virtio_device *vdev) | ||||||
| { | { | ||||||
|  | @ -2080,6 +2186,20 @@ static struct virtio_driver virtio_console = { | ||||||
| #endif | #endif | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | /*
 | ||||||
|  |  * virtio_rproc_serial refers to __devinit function which causes | ||||||
|  |  * section mismatch warnings. So use __refdata to silence warnings. | ||||||
|  |  */ | ||||||
|  | static struct virtio_driver __refdata virtio_rproc_serial = { | ||||||
|  | 	.feature_table = rproc_serial_features, | ||||||
|  | 	.feature_table_size = ARRAY_SIZE(rproc_serial_features), | ||||||
|  | 	.driver.name =	"virtio_rproc_serial", | ||||||
|  | 	.driver.owner =	THIS_MODULE, | ||||||
|  | 	.id_table =	rproc_serial_id_table, | ||||||
|  | 	.probe =	virtcons_probe, | ||||||
|  | 	.remove =	virtcons_remove, | ||||||
|  | }; | ||||||
|  | 
 | ||||||
| static int __init init(void) | static int __init init(void) | ||||||
| { | { | ||||||
| 	int err; | 	int err; | ||||||
|  | @ -2104,7 +2224,15 @@ static int __init init(void) | ||||||
| 		pr_err("Error %d registering virtio driver\n", err); | 		pr_err("Error %d registering virtio driver\n", err); | ||||||
| 		goto free; | 		goto free; | ||||||
| 	} | 	} | ||||||
|  | 	err = register_virtio_driver(&virtio_rproc_serial); | ||||||
|  | 	if (err < 0) { | ||||||
|  | 		pr_err("Error %d registering virtio rproc serial driver\n", | ||||||
|  | 		       err); | ||||||
|  | 		goto unregister; | ||||||
|  | 	} | ||||||
| 	return 0; | 	return 0; | ||||||
|  | unregister: | ||||||
|  | 	unregister_virtio_driver(&virtio_console); | ||||||
| free: | free: | ||||||
| 	if (pdrvdata.debugfs_dir) | 	if (pdrvdata.debugfs_dir) | ||||||
| 		debugfs_remove_recursive(pdrvdata.debugfs_dir); | 		debugfs_remove_recursive(pdrvdata.debugfs_dir); | ||||||
|  | @ -2114,7 +2242,10 @@ free: | ||||||
| 
 | 
 | ||||||
| static void __exit fini(void) | static void __exit fini(void) | ||||||
| { | { | ||||||
|  | 	reclaim_dma_bufs(); | ||||||
|  | 
 | ||||||
| 	unregister_virtio_driver(&virtio_console); | 	unregister_virtio_driver(&virtio_console); | ||||||
|  | 	unregister_virtio_driver(&virtio_rproc_serial); | ||||||
| 
 | 
 | ||||||
| 	class_destroy(pdrvdata.class); | 	class_destroy(pdrvdata.class); | ||||||
| 	if (pdrvdata.debugfs_dir) | 	if (pdrvdata.debugfs_dir) | ||||||
|  |  | ||||||
|  | @ -225,7 +225,7 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) | ||||||
| 			 * eventfd (ie. the appropriate virtqueue thread)? | 			 * eventfd (ie. the appropriate virtqueue thread)? | ||||||
| 			 */ | 			 */ | ||||||
| 			if (!send_notify_to_eventfd(cpu)) { | 			if (!send_notify_to_eventfd(cpu)) { | ||||||
| 				/* OK, we tell the main Laucher. */ | 				/* OK, we tell the main Launcher. */ | ||||||
| 				if (put_user(cpu->pending_notify, user)) | 				if (put_user(cpu->pending_notify, user)) | ||||||
| 					return -EFAULT; | 					return -EFAULT; | ||||||
| 				return sizeof(cpu->pending_notify); | 				return sizeof(cpu->pending_notify); | ||||||
|  |  | ||||||
|  | @ -130,7 +130,6 @@ struct skb_vnet_hdr { | ||||||
| 		struct virtio_net_hdr hdr; | 		struct virtio_net_hdr hdr; | ||||||
| 		struct virtio_net_hdr_mrg_rxbuf mhdr; | 		struct virtio_net_hdr_mrg_rxbuf mhdr; | ||||||
| 	}; | 	}; | ||||||
| 	unsigned int num_sg; |  | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| struct padded_vnet_hdr { | struct padded_vnet_hdr { | ||||||
|  | @ -530,10 +529,10 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp) | ||||||
| 			err = add_recvbuf_small(rq, gfp); | 			err = add_recvbuf_small(rq, gfp); | ||||||
| 
 | 
 | ||||||
| 		oom = err == -ENOMEM; | 		oom = err == -ENOMEM; | ||||||
| 		if (err < 0) | 		if (err) | ||||||
| 			break; | 			break; | ||||||
| 		++rq->num; | 		++rq->num; | ||||||
| 	} while (err > 0); | 	} while (rq->vq->num_free); | ||||||
| 	if (unlikely(rq->num > rq->max)) | 	if (unlikely(rq->num > rq->max)) | ||||||
| 		rq->max = rq->num; | 		rq->max = rq->num; | ||||||
| 	virtqueue_kick(rq->vq); | 	virtqueue_kick(rq->vq); | ||||||
|  | @ -640,10 +639,10 @@ static int virtnet_open(struct net_device *dev) | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static unsigned int free_old_xmit_skbs(struct send_queue *sq) | static void free_old_xmit_skbs(struct send_queue *sq) | ||||||
| { | { | ||||||
| 	struct sk_buff *skb; | 	struct sk_buff *skb; | ||||||
| 	unsigned int len, tot_sgs = 0; | 	unsigned int len; | ||||||
| 	struct virtnet_info *vi = sq->vq->vdev->priv; | 	struct virtnet_info *vi = sq->vq->vdev->priv; | ||||||
| 	struct virtnet_stats *stats = this_cpu_ptr(vi->stats); | 	struct virtnet_stats *stats = this_cpu_ptr(vi->stats); | ||||||
| 
 | 
 | ||||||
|  | @ -655,10 +654,8 @@ static unsigned int free_old_xmit_skbs(struct send_queue *sq) | ||||||
| 		stats->tx_packets++; | 		stats->tx_packets++; | ||||||
| 		u64_stats_update_end(&stats->tx_syncp); | 		u64_stats_update_end(&stats->tx_syncp); | ||||||
| 
 | 
 | ||||||
| 		tot_sgs += skb_vnet_hdr(skb)->num_sg; |  | ||||||
| 		dev_kfree_skb_any(skb); | 		dev_kfree_skb_any(skb); | ||||||
| 	} | 	} | ||||||
| 	return tot_sgs; |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) | static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) | ||||||
|  | @ -666,6 +663,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) | ||||||
| 	struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); | 	struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); | ||||||
| 	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; | 	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; | ||||||
| 	struct virtnet_info *vi = sq->vq->vdev->priv; | 	struct virtnet_info *vi = sq->vq->vdev->priv; | ||||||
|  | 	unsigned num_sg; | ||||||
| 
 | 
 | ||||||
| 	pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); | 	pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); | ||||||
| 
 | 
 | ||||||
|  | @ -704,8 +702,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) | ||||||
| 	else | 	else | ||||||
| 		sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr); | 		sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr); | ||||||
| 
 | 
 | ||||||
| 	hdr->num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; | 	num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; | ||||||
| 	return virtqueue_add_buf(sq->vq, sq->sg, hdr->num_sg, | 	return virtqueue_add_buf(sq->vq, sq->sg, num_sg, | ||||||
| 				 0, skb, GFP_ATOMIC); | 				 0, skb, GFP_ATOMIC); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -714,28 +712,20 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) | ||||||
| 	struct virtnet_info *vi = netdev_priv(dev); | 	struct virtnet_info *vi = netdev_priv(dev); | ||||||
| 	int qnum = skb_get_queue_mapping(skb); | 	int qnum = skb_get_queue_mapping(skb); | ||||||
| 	struct send_queue *sq = &vi->sq[qnum]; | 	struct send_queue *sq = &vi->sq[qnum]; | ||||||
| 	int capacity; | 	int err; | ||||||
| 
 | 
 | ||||||
| 	/* Free up any pending old buffers before queueing new ones. */ | 	/* Free up any pending old buffers before queueing new ones. */ | ||||||
| 	free_old_xmit_skbs(sq); | 	free_old_xmit_skbs(sq); | ||||||
| 
 | 
 | ||||||
| 	/* Try to transmit */ | 	/* Try to transmit */ | ||||||
| 	capacity = xmit_skb(sq, skb); | 	err = xmit_skb(sq, skb); | ||||||
| 
 | 
 | ||||||
| 	/* This can happen with OOM and indirect buffers. */ | 	/* This should not happen! */ | ||||||
| 	if (unlikely(capacity < 0)) { | 	if (unlikely(err)) { | ||||||
| 		if (likely(capacity == -ENOMEM)) { |  | ||||||
| 			if (net_ratelimit()) |  | ||||||
| 				dev_warn(&dev->dev, |  | ||||||
| 					 "TXQ (%d) failure: out of memory\n", |  | ||||||
| 					 qnum); |  | ||||||
| 		} else { |  | ||||||
| 		dev->stats.tx_fifo_errors++; | 		dev->stats.tx_fifo_errors++; | ||||||
| 		if (net_ratelimit()) | 		if (net_ratelimit()) | ||||||
| 			dev_warn(&dev->dev, | 			dev_warn(&dev->dev, | ||||||
| 					 "Unexpected TXQ (%d) failure: %d\n", | 				 "Unexpected TXQ (%d) queue failure: %d\n", qnum, err); | ||||||
| 					 qnum, capacity); |  | ||||||
| 		} |  | ||||||
| 		dev->stats.tx_dropped++; | 		dev->stats.tx_dropped++; | ||||||
| 		kfree_skb(skb); | 		kfree_skb(skb); | ||||||
| 		return NETDEV_TX_OK; | 		return NETDEV_TX_OK; | ||||||
|  | @ -748,12 +738,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) | ||||||
| 
 | 
 | ||||||
| 	/* Apparently nice girls don't return TX_BUSY; stop the queue
 | 	/* Apparently nice girls don't return TX_BUSY; stop the queue
 | ||||||
| 	 * before it gets out of hand.  Naturally, this wastes entries. */ | 	 * before it gets out of hand.  Naturally, this wastes entries. */ | ||||||
| 	if (capacity < 2+MAX_SKB_FRAGS) { | 	if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { | ||||||
| 		netif_stop_subqueue(dev, qnum); | 		netif_stop_subqueue(dev, qnum); | ||||||
| 		if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { | 		if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { | ||||||
| 			/* More just got used, free them then recheck. */ | 			/* More just got used, free them then recheck. */ | ||||||
| 			capacity += free_old_xmit_skbs(sq); | 			free_old_xmit_skbs(sq); | ||||||
| 			if (capacity >= 2+MAX_SKB_FRAGS) { | 			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { | ||||||
| 				netif_start_subqueue(dev, qnum); | 				netif_start_subqueue(dev, qnum); | ||||||
| 				virtqueue_disable_cb(sq->vq); | 				virtqueue_disable_cb(sq->vq); | ||||||
| 			} | 			} | ||||||
|  |  | ||||||
|  | @ -764,7 +764,7 @@ int rpmsg_send_offchannel_raw(struct rpmsg_channel *rpdev, u32 src, u32 dst, | ||||||
| 
 | 
 | ||||||
| 	/* add message to the remote processor's virtqueue */ | 	/* add message to the remote processor's virtqueue */ | ||||||
| 	err = virtqueue_add_buf(vrp->svq, &sg, 1, 0, msg, GFP_KERNEL); | 	err = virtqueue_add_buf(vrp->svq, &sg, 1, 0, msg, GFP_KERNEL); | ||||||
| 	if (err < 0) { | 	if (err) { | ||||||
| 		/*
 | 		/*
 | ||||||
| 		 * need to reclaim the buffer here, otherwise it's lost | 		 * need to reclaim the buffer here, otherwise it's lost | ||||||
| 		 * (memory won't leak, but rpmsg won't use it again for TX). | 		 * (memory won't leak, but rpmsg won't use it again for TX). | ||||||
|  | @ -776,8 +776,6 @@ int rpmsg_send_offchannel_raw(struct rpmsg_channel *rpdev, u32 src, u32 dst, | ||||||
| 
 | 
 | ||||||
| 	/* tell the remote processor it has a pending message to read */ | 	/* tell the remote processor it has a pending message to read */ | ||||||
| 	virtqueue_kick(vrp->svq); | 	virtqueue_kick(vrp->svq); | ||||||
| 
 |  | ||||||
| 	err = 0; |  | ||||||
| out: | out: | ||||||
| 	mutex_unlock(&vrp->tx_lock); | 	mutex_unlock(&vrp->tx_lock); | ||||||
| 	return err; | 	return err; | ||||||
|  | @ -980,7 +978,7 @@ static int rpmsg_probe(struct virtio_device *vdev) | ||||||
| 
 | 
 | ||||||
| 		err = virtqueue_add_buf(vrp->rvq, &sg, 0, 1, cpu_addr, | 		err = virtqueue_add_buf(vrp->rvq, &sg, 0, 1, cpu_addr, | ||||||
| 								GFP_KERNEL); | 								GFP_KERNEL); | ||||||
| 		WARN_ON(err < 0); /* sanity check; this can't really happen */ | 		WARN_ON(err); /* sanity check; this can't really happen */ | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	/* suppress "tx-complete" interrupts */ | 	/* suppress "tx-complete" interrupts */ | ||||||
|  |  | ||||||
|  | @ -215,7 +215,7 @@ static void virtscsi_ctrl_done(struct virtqueue *vq) | ||||||
| static int virtscsi_kick_event(struct virtio_scsi *vscsi, | static int virtscsi_kick_event(struct virtio_scsi *vscsi, | ||||||
| 			       struct virtio_scsi_event_node *event_node) | 			       struct virtio_scsi_event_node *event_node) | ||||||
| { | { | ||||||
| 	int ret; | 	int err; | ||||||
| 	struct scatterlist sg; | 	struct scatterlist sg; | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
| 
 | 
 | ||||||
|  | @ -223,13 +223,14 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi, | ||||||
| 
 | 
 | ||||||
| 	spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); | 	spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); | ||||||
| 
 | 
 | ||||||
| 	ret = virtqueue_add_buf(vscsi->event_vq.vq, &sg, 0, 1, event_node, GFP_ATOMIC); | 	err = virtqueue_add_buf(vscsi->event_vq.vq, &sg, 0, 1, event_node, | ||||||
| 	if (ret >= 0) | 				GFP_ATOMIC); | ||||||
|  | 	if (!err) | ||||||
| 		virtqueue_kick(vscsi->event_vq.vq); | 		virtqueue_kick(vscsi->event_vq.vq); | ||||||
| 
 | 
 | ||||||
| 	spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags); | 	spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags); | ||||||
| 
 | 
 | ||||||
| 	return ret; | 	return err; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int virtscsi_kick_event_all(struct virtio_scsi *vscsi) | static int virtscsi_kick_event_all(struct virtio_scsi *vscsi) | ||||||
|  | @ -410,22 +411,23 @@ static int virtscsi_kick_cmd(struct virtio_scsi_target_state *tgt, | ||||||
| { | { | ||||||
| 	unsigned int out_num, in_num; | 	unsigned int out_num, in_num; | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
| 	int ret; | 	int err; | ||||||
|  | 	bool needs_kick = false; | ||||||
| 
 | 
 | ||||||
| 	spin_lock_irqsave(&tgt->tgt_lock, flags); | 	spin_lock_irqsave(&tgt->tgt_lock, flags); | ||||||
| 	virtscsi_map_cmd(tgt, cmd, &out_num, &in_num, req_size, resp_size); | 	virtscsi_map_cmd(tgt, cmd, &out_num, &in_num, req_size, resp_size); | ||||||
| 
 | 
 | ||||||
| 	spin_lock(&vq->vq_lock); | 	spin_lock(&vq->vq_lock); | ||||||
| 	ret = virtqueue_add_buf(vq->vq, tgt->sg, out_num, in_num, cmd, gfp); | 	err = virtqueue_add_buf(vq->vq, tgt->sg, out_num, in_num, cmd, gfp); | ||||||
| 	spin_unlock(&tgt->tgt_lock); | 	spin_unlock(&tgt->tgt_lock); | ||||||
| 	if (ret >= 0) | 	if (!err) | ||||||
| 		ret = virtqueue_kick_prepare(vq->vq); | 		needs_kick = virtqueue_kick_prepare(vq->vq); | ||||||
| 
 | 
 | ||||||
| 	spin_unlock_irqrestore(&vq->vq_lock, flags); | 	spin_unlock_irqrestore(&vq->vq_lock, flags); | ||||||
| 
 | 
 | ||||||
| 	if (ret > 0) | 	if (needs_kick) | ||||||
| 		virtqueue_notify(vq->vq); | 		virtqueue_notify(vq->vq); | ||||||
| 	return ret; | 	return err; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) | static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) | ||||||
|  | @ -467,7 +469,7 @@ static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) | ||||||
| 
 | 
 | ||||||
| 	if (virtscsi_kick_cmd(tgt, &vscsi->req_vq, cmd, | 	if (virtscsi_kick_cmd(tgt, &vscsi->req_vq, cmd, | ||||||
| 			      sizeof cmd->req.cmd, sizeof cmd->resp.cmd, | 			      sizeof cmd->req.cmd, sizeof cmd->resp.cmd, | ||||||
| 			      GFP_ATOMIC) >= 0) | 			      GFP_ATOMIC) == 0) | ||||||
| 		ret = 0; | 		ret = 0; | ||||||
| 	else | 	else | ||||||
| 		mempool_free(cmd, virtscsi_cmd_pool); | 		mempool_free(cmd, virtscsi_cmd_pool); | ||||||
|  |  | ||||||
|  | @ -10,33 +10,32 @@ static DEFINE_IDA(virtio_index_ida); | ||||||
| static ssize_t device_show(struct device *_d, | static ssize_t device_show(struct device *_d, | ||||||
| 			   struct device_attribute *attr, char *buf) | 			   struct device_attribute *attr, char *buf) | ||||||
| { | { | ||||||
| 	struct virtio_device *dev = container_of(_d,struct virtio_device,dev); | 	struct virtio_device *dev = dev_to_virtio(_d); | ||||||
| 	return sprintf(buf, "0x%04x\n", dev->id.device); | 	return sprintf(buf, "0x%04x\n", dev->id.device); | ||||||
| } | } | ||||||
| static ssize_t vendor_show(struct device *_d, | static ssize_t vendor_show(struct device *_d, | ||||||
| 			   struct device_attribute *attr, char *buf) | 			   struct device_attribute *attr, char *buf) | ||||||
| { | { | ||||||
| 	struct virtio_device *dev = container_of(_d,struct virtio_device,dev); | 	struct virtio_device *dev = dev_to_virtio(_d); | ||||||
| 	return sprintf(buf, "0x%04x\n", dev->id.vendor); | 	return sprintf(buf, "0x%04x\n", dev->id.vendor); | ||||||
| } | } | ||||||
| static ssize_t status_show(struct device *_d, | static ssize_t status_show(struct device *_d, | ||||||
| 			   struct device_attribute *attr, char *buf) | 			   struct device_attribute *attr, char *buf) | ||||||
| { | { | ||||||
| 	struct virtio_device *dev = container_of(_d,struct virtio_device,dev); | 	struct virtio_device *dev = dev_to_virtio(_d); | ||||||
| 	return sprintf(buf, "0x%08x\n", dev->config->get_status(dev)); | 	return sprintf(buf, "0x%08x\n", dev->config->get_status(dev)); | ||||||
| } | } | ||||||
| static ssize_t modalias_show(struct device *_d, | static ssize_t modalias_show(struct device *_d, | ||||||
| 			     struct device_attribute *attr, char *buf) | 			     struct device_attribute *attr, char *buf) | ||||||
| { | { | ||||||
| 	struct virtio_device *dev = container_of(_d,struct virtio_device,dev); | 	struct virtio_device *dev = dev_to_virtio(_d); | ||||||
| 
 |  | ||||||
| 	return sprintf(buf, "virtio:d%08Xv%08X\n", | 	return sprintf(buf, "virtio:d%08Xv%08X\n", | ||||||
| 		       dev->id.device, dev->id.vendor); | 		       dev->id.device, dev->id.vendor); | ||||||
| } | } | ||||||
| static ssize_t features_show(struct device *_d, | static ssize_t features_show(struct device *_d, | ||||||
| 			     struct device_attribute *attr, char *buf) | 			     struct device_attribute *attr, char *buf) | ||||||
| { | { | ||||||
| 	struct virtio_device *dev = container_of(_d, struct virtio_device, dev); | 	struct virtio_device *dev = dev_to_virtio(_d); | ||||||
| 	unsigned int i; | 	unsigned int i; | ||||||
| 	ssize_t len = 0; | 	ssize_t len = 0; | ||||||
| 
 | 
 | ||||||
|  | @ -71,10 +70,10 @@ static inline int virtio_id_match(const struct virtio_device *dev, | ||||||
| static int virtio_dev_match(struct device *_dv, struct device_driver *_dr) | static int virtio_dev_match(struct device *_dv, struct device_driver *_dr) | ||||||
| { | { | ||||||
| 	unsigned int i; | 	unsigned int i; | ||||||
| 	struct virtio_device *dev = container_of(_dv,struct virtio_device,dev); | 	struct virtio_device *dev = dev_to_virtio(_dv); | ||||||
| 	const struct virtio_device_id *ids; | 	const struct virtio_device_id *ids; | ||||||
| 
 | 
 | ||||||
| 	ids = container_of(_dr, struct virtio_driver, driver)->id_table; | 	ids = drv_to_virtio(_dr)->id_table; | ||||||
| 	for (i = 0; ids[i].device; i++) | 	for (i = 0; ids[i].device; i++) | ||||||
| 		if (virtio_id_match(dev, &ids[i])) | 		if (virtio_id_match(dev, &ids[i])) | ||||||
| 			return 1; | 			return 1; | ||||||
|  | @ -83,7 +82,7 @@ static int virtio_dev_match(struct device *_dv, struct device_driver *_dr) | ||||||
| 
 | 
 | ||||||
| static int virtio_uevent(struct device *_dv, struct kobj_uevent_env *env) | static int virtio_uevent(struct device *_dv, struct kobj_uevent_env *env) | ||||||
| { | { | ||||||
| 	struct virtio_device *dev = container_of(_dv,struct virtio_device,dev); | 	struct virtio_device *dev = dev_to_virtio(_dv); | ||||||
| 
 | 
 | ||||||
| 	return add_uevent_var(env, "MODALIAS=virtio:d%08Xv%08X", | 	return add_uevent_var(env, "MODALIAS=virtio:d%08Xv%08X", | ||||||
| 			      dev->id.device, dev->id.vendor); | 			      dev->id.device, dev->id.vendor); | ||||||
|  | @ -98,8 +97,7 @@ void virtio_check_driver_offered_feature(const struct virtio_device *vdev, | ||||||
| 					 unsigned int fbit) | 					 unsigned int fbit) | ||||||
| { | { | ||||||
| 	unsigned int i; | 	unsigned int i; | ||||||
| 	struct virtio_driver *drv = container_of(vdev->dev.driver, | 	struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver); | ||||||
| 						 struct virtio_driver, driver); |  | ||||||
| 
 | 
 | ||||||
| 	for (i = 0; i < drv->feature_table_size; i++) | 	for (i = 0; i < drv->feature_table_size; i++) | ||||||
| 		if (drv->feature_table[i] == fbit) | 		if (drv->feature_table[i] == fbit) | ||||||
|  | @ -111,9 +109,8 @@ EXPORT_SYMBOL_GPL(virtio_check_driver_offered_feature); | ||||||
| static int virtio_dev_probe(struct device *_d) | static int virtio_dev_probe(struct device *_d) | ||||||
| { | { | ||||||
| 	int err, i; | 	int err, i; | ||||||
| 	struct virtio_device *dev = container_of(_d,struct virtio_device,dev); | 	struct virtio_device *dev = dev_to_virtio(_d); | ||||||
| 	struct virtio_driver *drv = container_of(dev->dev.driver, | 	struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); | ||||||
| 						 struct virtio_driver, driver); |  | ||||||
| 	u32 device_features; | 	u32 device_features; | ||||||
| 
 | 
 | ||||||
| 	/* We have a driver! */ | 	/* We have a driver! */ | ||||||
|  | @ -152,9 +149,8 @@ static int virtio_dev_probe(struct device *_d) | ||||||
| 
 | 
 | ||||||
| static int virtio_dev_remove(struct device *_d) | static int virtio_dev_remove(struct device *_d) | ||||||
| { | { | ||||||
| 	struct virtio_device *dev = container_of(_d,struct virtio_device,dev); | 	struct virtio_device *dev = dev_to_virtio(_d); | ||||||
| 	struct virtio_driver *drv = container_of(dev->dev.driver, | 	struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); | ||||||
| 						 struct virtio_driver, driver); |  | ||||||
| 
 | 
 | ||||||
| 	drv->remove(dev); | 	drv->remove(dev); | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -139,8 +139,7 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num) | ||||||
| 		struct page *page = balloon_page_enqueue(vb_dev_info); | 		struct page *page = balloon_page_enqueue(vb_dev_info); | ||||||
| 
 | 
 | ||||||
| 		if (!page) { | 		if (!page) { | ||||||
| 			if (printk_ratelimit()) | 			dev_info_ratelimited(&vb->vdev->dev, | ||||||
| 				dev_printk(KERN_INFO, &vb->vdev->dev, |  | ||||||
| 					     "Out of puff! Can't get %u pages\n", | 					     "Out of puff! Can't get %u pages\n", | ||||||
| 					     VIRTIO_BALLOON_PAGES_PER_PAGE); | 					     VIRTIO_BALLOON_PAGES_PER_PAGE); | ||||||
| 			/* Sleep for at least 1/5 of a second before retry. */ | 			/* Sleep for at least 1/5 of a second before retry. */ | ||||||
|  |  | ||||||
|  | @ -225,7 +225,7 @@ static void vm_notify(struct virtqueue *vq) | ||||||
| 
 | 
 | ||||||
| 	/* We write the queue's selector into the notification register to
 | 	/* We write the queue's selector into the notification register to
 | ||||||
| 	 * signal the other end */ | 	 * signal the other end */ | ||||||
| 	writel(virtqueue_get_queue_index(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); | 	writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /* Notify all virtqueues on an interrupt. */ | /* Notify all virtqueues on an interrupt. */ | ||||||
|  | @ -266,7 +266,7 @@ static void vm_del_vq(struct virtqueue *vq) | ||||||
| 	struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); | 	struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); | ||||||
| 	struct virtio_mmio_vq_info *info = vq->priv; | 	struct virtio_mmio_vq_info *info = vq->priv; | ||||||
| 	unsigned long flags, size; | 	unsigned long flags, size; | ||||||
| 	unsigned int index = virtqueue_get_queue_index(vq); | 	unsigned int index = vq->index; | ||||||
| 
 | 
 | ||||||
| 	spin_lock_irqsave(&vm_dev->lock, flags); | 	spin_lock_irqsave(&vm_dev->lock, flags); | ||||||
| 	list_del(&info->node); | 	list_del(&info->node); | ||||||
|  | @ -521,25 +521,33 @@ static int vm_cmdline_set(const char *device, | ||||||
| 	int err; | 	int err; | ||||||
| 	struct resource resources[2] = {}; | 	struct resource resources[2] = {}; | ||||||
| 	char *str; | 	char *str; | ||||||
| 	long long int base; | 	long long int base, size; | ||||||
|  | 	unsigned int irq; | ||||||
| 	int processed, consumed = 0; | 	int processed, consumed = 0; | ||||||
| 	struct platform_device *pdev; | 	struct platform_device *pdev; | ||||||
| 
 | 
 | ||||||
| 	resources[0].flags = IORESOURCE_MEM; | 	/* Consume "size" part of the command line parameter */ | ||||||
| 	resources[1].flags = IORESOURCE_IRQ; | 	size = memparse(device, &str); | ||||||
| 
 |  | ||||||
| 	resources[0].end = memparse(device, &str) - 1; |  | ||||||
| 
 | 
 | ||||||
|  | 	/* Get "@<base>:<irq>[:<id>]" chunks */ | ||||||
| 	processed = sscanf(str, "@%lli:%u%n:%d%n", | 	processed = sscanf(str, "@%lli:%u%n:%d%n", | ||||||
| 			&base, &resources[1].start, &consumed, | 			&base, &irq, &consumed, | ||||||
| 			&vm_cmdline_id, &consumed); | 			&vm_cmdline_id, &consumed); | ||||||
| 
 | 
 | ||||||
| 	if (processed < 2 || processed > 3 || str[consumed]) | 	/*
 | ||||||
|  | 	 * sscanf() must processes at least 2 chunks; also there | ||||||
|  | 	 * must be no extra characters after the last chunk, so | ||||||
|  | 	 * str[consumed] must be '\0' | ||||||
|  | 	 */ | ||||||
|  | 	if (processed < 2 || str[consumed]) | ||||||
| 		return -EINVAL; | 		return -EINVAL; | ||||||
| 
 | 
 | ||||||
|  | 	resources[0].flags = IORESOURCE_MEM; | ||||||
| 	resources[0].start = base; | 	resources[0].start = base; | ||||||
| 	resources[0].end += base; | 	resources[0].end = base + size - 1; | ||||||
| 	resources[1].end = resources[1].start; | 
 | ||||||
|  | 	resources[1].flags = IORESOURCE_IRQ; | ||||||
|  | 	resources[1].start = resources[1].end = irq; | ||||||
| 
 | 
 | ||||||
| 	if (!vm_cmdline_parent_registered) { | 	if (!vm_cmdline_parent_registered) { | ||||||
| 		err = device_register(&vm_cmdline_parent); | 		err = device_register(&vm_cmdline_parent); | ||||||
|  |  | ||||||
|  | @ -203,8 +203,7 @@ static void vp_notify(struct virtqueue *vq) | ||||||
| 
 | 
 | ||||||
| 	/* we write the queue's selector into the notification register to
 | 	/* we write the queue's selector into the notification register to
 | ||||||
| 	 * signal the other end */ | 	 * signal the other end */ | ||||||
| 	iowrite16(virtqueue_get_queue_index(vq), | 	iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); | ||||||
| 		  vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /* Handle a configuration change: Tell driver if it wants to know. */ | /* Handle a configuration change: Tell driver if it wants to know. */ | ||||||
|  | @ -479,8 +478,7 @@ static void vp_del_vq(struct virtqueue *vq) | ||||||
| 	list_del(&info->node); | 	list_del(&info->node); | ||||||
| 	spin_unlock_irqrestore(&vp_dev->lock, flags); | 	spin_unlock_irqrestore(&vp_dev->lock, flags); | ||||||
| 
 | 
 | ||||||
| 	iowrite16(virtqueue_get_queue_index(vq), | 	iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); | ||||||
| 		vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); |  | ||||||
| 
 | 
 | ||||||
| 	if (vp_dev->msix_enabled) { | 	if (vp_dev->msix_enabled) { | ||||||
| 		iowrite16(VIRTIO_MSI_NO_VECTOR, | 		iowrite16(VIRTIO_MSI_NO_VECTOR, | ||||||
|  | @ -830,16 +828,4 @@ static struct pci_driver virtio_pci_driver = { | ||||||
| #endif | #endif | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| static int __init virtio_pci_init(void) | module_pci_driver(virtio_pci_driver); | ||||||
| { |  | ||||||
| 	return pci_register_driver(&virtio_pci_driver); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| module_init(virtio_pci_init); |  | ||||||
| 
 |  | ||||||
| static void __exit virtio_pci_exit(void) |  | ||||||
| { |  | ||||||
| 	pci_unregister_driver(&virtio_pci_driver); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| module_exit(virtio_pci_exit); |  | ||||||
|  |  | ||||||
|  | @ -93,8 +93,6 @@ struct vring_virtqueue | ||||||
| 	/* Host publishes avail event idx */ | 	/* Host publishes avail event idx */ | ||||||
| 	bool event; | 	bool event; | ||||||
| 
 | 
 | ||||||
| 	/* Number of free buffers */ |  | ||||||
| 	unsigned int num_free; |  | ||||||
| 	/* Head of free buffer list. */ | 	/* Head of free buffer list. */ | ||||||
| 	unsigned int free_head; | 	unsigned int free_head; | ||||||
| 	/* Number we've added since last sync. */ | 	/* Number we've added since last sync. */ | ||||||
|  | @ -106,9 +104,6 @@ struct vring_virtqueue | ||||||
| 	/* How to notify other side. FIXME: commonalize hcalls! */ | 	/* How to notify other side. FIXME: commonalize hcalls! */ | ||||||
| 	void (*notify)(struct virtqueue *vq); | 	void (*notify)(struct virtqueue *vq); | ||||||
| 
 | 
 | ||||||
| 	/* Index of the queue */ |  | ||||||
| 	int queue_index; |  | ||||||
| 
 |  | ||||||
| #ifdef DEBUG | #ifdef DEBUG | ||||||
| 	/* They're supposed to lock for us. */ | 	/* They're supposed to lock for us. */ | ||||||
| 	unsigned int in_use; | 	unsigned int in_use; | ||||||
|  | @ -135,6 +130,13 @@ static int vring_add_indirect(struct vring_virtqueue *vq, | ||||||
| 	unsigned head; | 	unsigned head; | ||||||
| 	int i; | 	int i; | ||||||
| 
 | 
 | ||||||
|  | 	/*
 | ||||||
|  | 	 * We require lowmem mappings for the descriptors because | ||||||
|  | 	 * otherwise virt_to_phys will give us bogus addresses in the | ||||||
|  | 	 * virtqueue. | ||||||
|  | 	 */ | ||||||
|  | 	gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH); | ||||||
|  | 
 | ||||||
| 	desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp); | 	desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp); | ||||||
| 	if (!desc) | 	if (!desc) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  | @ -160,7 +162,7 @@ static int vring_add_indirect(struct vring_virtqueue *vq, | ||||||
| 	desc[i-1].next = 0; | 	desc[i-1].next = 0; | ||||||
| 
 | 
 | ||||||
| 	/* We're about to use a buffer */ | 	/* We're about to use a buffer */ | ||||||
| 	vq->num_free--; | 	vq->vq.num_free--; | ||||||
| 
 | 
 | ||||||
| 	/* Use a single buffer which doesn't continue */ | 	/* Use a single buffer which doesn't continue */ | ||||||
| 	head = vq->free_head; | 	head = vq->free_head; | ||||||
|  | @ -174,13 +176,6 @@ static int vring_add_indirect(struct vring_virtqueue *vq, | ||||||
| 	return head; | 	return head; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| int virtqueue_get_queue_index(struct virtqueue *_vq) |  | ||||||
| { |  | ||||||
| 	struct vring_virtqueue *vq = to_vvq(_vq); |  | ||||||
| 	return vq->queue_index; |  | ||||||
| } |  | ||||||
| EXPORT_SYMBOL_GPL(virtqueue_get_queue_index); |  | ||||||
| 
 |  | ||||||
| /**
 | /**
 | ||||||
|  * virtqueue_add_buf - expose buffer to other end |  * virtqueue_add_buf - expose buffer to other end | ||||||
|  * @vq: the struct virtqueue we're talking about. |  * @vq: the struct virtqueue we're talking about. | ||||||
|  | @ -193,10 +188,7 @@ EXPORT_SYMBOL_GPL(virtqueue_get_queue_index); | ||||||
|  * Caller must ensure we don't call this with other virtqueue operations |  * Caller must ensure we don't call this with other virtqueue operations | ||||||
|  * at the same time (except where noted). |  * at the same time (except where noted). | ||||||
|  * |  * | ||||||
|  * Returns remaining capacity of queue or a negative error |  * Returns zero or a negative error (ie. ENOSPC, ENOMEM). | ||||||
|  * (ie. ENOSPC).  Note that it only really makes sense to treat all |  | ||||||
|  * positive return values as "available": indirect buffers mean that |  | ||||||
|  * we can put an entire sg[] array inside a single queue entry. |  | ||||||
|  */ |  */ | ||||||
| int virtqueue_add_buf(struct virtqueue *_vq, | int virtqueue_add_buf(struct virtqueue *_vq, | ||||||
| 		      struct scatterlist sg[], | 		      struct scatterlist sg[], | ||||||
|  | @ -228,7 +220,7 @@ int virtqueue_add_buf(struct virtqueue *_vq, | ||||||
| 
 | 
 | ||||||
| 	/* If the host supports indirect descriptor tables, and we have multiple
 | 	/* If the host supports indirect descriptor tables, and we have multiple
 | ||||||
| 	 * buffers, then go indirect. FIXME: tune this threshold */ | 	 * buffers, then go indirect. FIXME: tune this threshold */ | ||||||
| 	if (vq->indirect && (out + in) > 1 && vq->num_free) { | 	if (vq->indirect && (out + in) > 1 && vq->vq.num_free) { | ||||||
| 		head = vring_add_indirect(vq, sg, out, in, gfp); | 		head = vring_add_indirect(vq, sg, out, in, gfp); | ||||||
| 		if (likely(head >= 0)) | 		if (likely(head >= 0)) | ||||||
| 			goto add_head; | 			goto add_head; | ||||||
|  | @ -237,9 +229,9 @@ int virtqueue_add_buf(struct virtqueue *_vq, | ||||||
| 	BUG_ON(out + in > vq->vring.num); | 	BUG_ON(out + in > vq->vring.num); | ||||||
| 	BUG_ON(out + in == 0); | 	BUG_ON(out + in == 0); | ||||||
| 
 | 
 | ||||||
| 	if (vq->num_free < out + in) { | 	if (vq->vq.num_free < out + in) { | ||||||
| 		pr_debug("Can't add buf len %i - avail = %i\n", | 		pr_debug("Can't add buf len %i - avail = %i\n", | ||||||
| 			 out + in, vq->num_free); | 			 out + in, vq->vq.num_free); | ||||||
| 		/* FIXME: for historical reasons, we force a notify here if
 | 		/* FIXME: for historical reasons, we force a notify here if
 | ||||||
| 		 * there are outgoing parts to the buffer.  Presumably the | 		 * there are outgoing parts to the buffer.  Presumably the | ||||||
| 		 * host should service the ring ASAP. */ | 		 * host should service the ring ASAP. */ | ||||||
|  | @ -250,7 +242,7 @@ int virtqueue_add_buf(struct virtqueue *_vq, | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	/* We're about to use some buffers from the free list. */ | 	/* We're about to use some buffers from the free list. */ | ||||||
| 	vq->num_free -= out + in; | 	vq->vq.num_free -= out + in; | ||||||
| 
 | 
 | ||||||
| 	head = vq->free_head; | 	head = vq->free_head; | ||||||
| 	for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) { | 	for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) { | ||||||
|  | @ -296,7 +288,7 @@ add_head: | ||||||
| 	pr_debug("Added buffer head %i to %p\n", head, vq); | 	pr_debug("Added buffer head %i to %p\n", head, vq); | ||||||
| 	END_USE(vq); | 	END_USE(vq); | ||||||
| 
 | 
 | ||||||
| 	return vq->num_free; | 	return 0; | ||||||
| } | } | ||||||
| EXPORT_SYMBOL_GPL(virtqueue_add_buf); | EXPORT_SYMBOL_GPL(virtqueue_add_buf); | ||||||
| 
 | 
 | ||||||
|  | @ -393,13 +385,13 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head) | ||||||
| 
 | 
 | ||||||
| 	while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { | 	while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { | ||||||
| 		i = vq->vring.desc[i].next; | 		i = vq->vring.desc[i].next; | ||||||
| 		vq->num_free++; | 		vq->vq.num_free++; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	vq->vring.desc[i].next = vq->free_head; | 	vq->vring.desc[i].next = vq->free_head; | ||||||
| 	vq->free_head = head; | 	vq->free_head = head; | ||||||
| 	/* Plus final descriptor */ | 	/* Plus final descriptor */ | ||||||
| 	vq->num_free++; | 	vq->vq.num_free++; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline bool more_used(const struct vring_virtqueue *vq) | static inline bool more_used(const struct vring_virtqueue *vq) | ||||||
|  | @ -599,7 +591,7 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq) | ||||||
| 		return buf; | 		return buf; | ||||||
| 	} | 	} | ||||||
| 	/* That should have freed everything. */ | 	/* That should have freed everything. */ | ||||||
| 	BUG_ON(vq->num_free != vq->vring.num); | 	BUG_ON(vq->vq.num_free != vq->vring.num); | ||||||
| 
 | 
 | ||||||
| 	END_USE(vq); | 	END_USE(vq); | ||||||
| 	return NULL; | 	return NULL; | ||||||
|  | @ -653,12 +645,13 @@ struct virtqueue *vring_new_virtqueue(unsigned int index, | ||||||
| 	vq->vq.callback = callback; | 	vq->vq.callback = callback; | ||||||
| 	vq->vq.vdev = vdev; | 	vq->vq.vdev = vdev; | ||||||
| 	vq->vq.name = name; | 	vq->vq.name = name; | ||||||
|  | 	vq->vq.num_free = num; | ||||||
|  | 	vq->vq.index = index; | ||||||
| 	vq->notify = notify; | 	vq->notify = notify; | ||||||
| 	vq->weak_barriers = weak_barriers; | 	vq->weak_barriers = weak_barriers; | ||||||
| 	vq->broken = false; | 	vq->broken = false; | ||||||
| 	vq->last_used_idx = 0; | 	vq->last_used_idx = 0; | ||||||
| 	vq->num_added = 0; | 	vq->num_added = 0; | ||||||
| 	vq->queue_index = index; |  | ||||||
| 	list_add_tail(&vq->vq.list, &vdev->vqs); | 	list_add_tail(&vq->vq.list, &vdev->vqs); | ||||||
| #ifdef DEBUG | #ifdef DEBUG | ||||||
| 	vq->in_use = false; | 	vq->in_use = false; | ||||||
|  | @ -673,7 +666,6 @@ struct virtqueue *vring_new_virtqueue(unsigned int index, | ||||||
| 		vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; | 		vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; | ||||||
| 
 | 
 | ||||||
| 	/* Put everything in free lists. */ | 	/* Put everything in free lists. */ | ||||||
| 	vq->num_free = num; |  | ||||||
| 	vq->free_head = 0; | 	vq->free_head = 0; | ||||||
| 	for (i = 0; i < num-1; i++) { | 	for (i = 0; i < num-1; i++) { | ||||||
| 		vq->vring.desc[i].next = i+1; | 		vq->vring.desc[i].next = i+1; | ||||||
|  |  | ||||||
|  | @ -16,12 +16,20 @@ | ||||||
|  * @name: the name of this virtqueue (mainly for debugging) |  * @name: the name of this virtqueue (mainly for debugging) | ||||||
|  * @vdev: the virtio device this queue was created for. |  * @vdev: the virtio device this queue was created for. | ||||||
|  * @priv: a pointer for the virtqueue implementation to use. |  * @priv: a pointer for the virtqueue implementation to use. | ||||||
|  |  * @index: the zero-based ordinal number for this queue. | ||||||
|  |  * @num_free: number of elements we expect to be able to fit. | ||||||
|  |  * | ||||||
|  |  * A note on @num_free: with indirect buffers, each buffer needs one | ||||||
|  |  * element in the queue, otherwise a buffer will need one element per | ||||||
|  |  * sg element. | ||||||
|  */ |  */ | ||||||
| struct virtqueue { | struct virtqueue { | ||||||
| 	struct list_head list; | 	struct list_head list; | ||||||
| 	void (*callback)(struct virtqueue *vq); | 	void (*callback)(struct virtqueue *vq); | ||||||
| 	const char *name; | 	const char *name; | ||||||
| 	struct virtio_device *vdev; | 	struct virtio_device *vdev; | ||||||
|  | 	unsigned int index; | ||||||
|  | 	unsigned int num_free; | ||||||
| 	void *priv; | 	void *priv; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | @ -50,7 +58,11 @@ void *virtqueue_detach_unused_buf(struct virtqueue *vq); | ||||||
| 
 | 
 | ||||||
| unsigned int virtqueue_get_vring_size(struct virtqueue *vq); | unsigned int virtqueue_get_vring_size(struct virtqueue *vq); | ||||||
| 
 | 
 | ||||||
| int virtqueue_get_queue_index(struct virtqueue *vq); | /* FIXME: Obsolete accessor, but required for virtio_net merge. */ | ||||||
|  | static inline unsigned int virtqueue_get_queue_index(struct virtqueue *vq) | ||||||
|  | { | ||||||
|  | 	return vq->index; | ||||||
|  | } | ||||||
| 
 | 
 | ||||||
| /**
 | /**
 | ||||||
|  * virtio_device - representation of a device using virtio |  * virtio_device - representation of a device using virtio | ||||||
|  | @ -73,7 +85,11 @@ struct virtio_device { | ||||||
| 	void *priv; | 	void *priv; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| #define dev_to_virtio(dev) container_of(dev, struct virtio_device, dev) | static inline struct virtio_device *dev_to_virtio(struct device *_dev) | ||||||
|  | { | ||||||
|  | 	return container_of(_dev, struct virtio_device, dev); | ||||||
|  | } | ||||||
|  | 
 | ||||||
| int register_virtio_device(struct virtio_device *dev); | int register_virtio_device(struct virtio_device *dev); | ||||||
| void unregister_virtio_device(struct virtio_device *dev); | void unregister_virtio_device(struct virtio_device *dev); | ||||||
| 
 | 
 | ||||||
|  | @ -103,6 +119,11 @@ struct virtio_driver { | ||||||
| #endif | #endif | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | static inline struct virtio_driver *drv_to_virtio(struct device_driver *drv) | ||||||
|  | { | ||||||
|  | 	return container_of(drv, struct virtio_driver, driver); | ||||||
|  | } | ||||||
|  | 
 | ||||||
| int register_virtio_driver(struct virtio_driver *drv); | int register_virtio_driver(struct virtio_driver *drv); | ||||||
| void unregister_virtio_driver(struct virtio_driver *drv); | void unregister_virtio_driver(struct virtio_driver *drv); | ||||||
| #endif /* _LINUX_VIRTIO_H */ | #endif /* _LINUX_VIRTIO_H */ | ||||||
|  |  | ||||||
|  | @ -1,7 +1,31 @@ | ||||||
|  | /*
 | ||||||
|  |  * This header is BSD licensed so anyone can use the definitions to implement | ||||||
|  |  * compatible drivers/servers. | ||||||
|  |  * | ||||||
|  |  * Redistribution and use in source and binary forms, with or without | ||||||
|  |  * modification, are permitted provided that the following conditions | ||||||
|  |  * are met: | ||||||
|  |  * 1. Redistributions of source code must retain the above copyright | ||||||
|  |  *    notice, this list of conditions and the following disclaimer. | ||||||
|  |  * 2. Redistributions in binary form must reproduce the above copyright | ||||||
|  |  *    notice, this list of conditions and the following disclaimer in the | ||||||
|  |  *    documentation and/or other materials provided with the distribution. | ||||||
|  |  * | ||||||
|  |  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND | ||||||
|  |  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||||||
|  |  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||||||
|  |  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE | ||||||
|  |  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||||||
|  |  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||||||
|  |  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||||||
|  |  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | ||||||
|  |  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | ||||||
|  |  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | ||||||
|  |  * SUCH DAMAGE. | ||||||
|  |  */ | ||||||
|  | 
 | ||||||
| #ifndef _LINUX_VIRTIO_SCSI_H | #ifndef _LINUX_VIRTIO_SCSI_H | ||||||
| #define _LINUX_VIRTIO_SCSI_H | #define _LINUX_VIRTIO_SCSI_H | ||||||
| /* This header is BSD licensed so anyone can use the definitions to implement
 |  | ||||||
|  * compatible drivers/servers. */ |  | ||||||
| 
 | 
 | ||||||
| #define VIRTIO_SCSI_CDB_SIZE   32 | #define VIRTIO_SCSI_CDB_SIZE   32 | ||||||
| #define VIRTIO_SCSI_SENSE_SIZE 96 | #define VIRTIO_SCSI_SENSE_SIZE 96 | ||||||
|  |  | ||||||
|  | @ -37,5 +37,6 @@ | ||||||
| #define VIRTIO_ID_RPMSG		7 /* virtio remote processor messaging */ | #define VIRTIO_ID_RPMSG		7 /* virtio remote processor messaging */ | ||||||
| #define VIRTIO_ID_SCSI		8 /* virtio scsi */ | #define VIRTIO_ID_SCSI		8 /* virtio scsi */ | ||||||
| #define VIRTIO_ID_9P		9 /* 9p virtio console */ | #define VIRTIO_ID_9P		9 /* 9p virtio console */ | ||||||
|  | #define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */ | ||||||
| 
 | 
 | ||||||
| #endif /* _LINUX_VIRTIO_IDS_H */ | #endif /* _LINUX_VIRTIO_IDS_H */ | ||||||
|  |  | ||||||
|  | @ -105,6 +105,7 @@ struct page *kmap_to_page(void *vaddr) | ||||||
| 
 | 
 | ||||||
| 	return virt_to_page(addr); | 	return virt_to_page(addr); | ||||||
| } | } | ||||||
|  | EXPORT_SYMBOL(kmap_to_page); | ||||||
| 
 | 
 | ||||||
| static void flush_all_zero_pkmaps(void) | static void flush_all_zero_pkmaps(void) | ||||||
| { | { | ||||||
|  |  | ||||||
|  | @ -39,6 +39,7 @@ | ||||||
| #include <linux/inet.h> | #include <linux/inet.h> | ||||||
| #include <linux/idr.h> | #include <linux/idr.h> | ||||||
| #include <linux/file.h> | #include <linux/file.h> | ||||||
|  | #include <linux/highmem.h> | ||||||
| #include <linux/slab.h> | #include <linux/slab.h> | ||||||
| #include <net/9p/9p.h> | #include <net/9p/9p.h> | ||||||
| #include <linux/parser.h> | #include <linux/parser.h> | ||||||
|  | @ -325,7 +326,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan, | ||||||
| 		int count = nr_pages; | 		int count = nr_pages; | ||||||
| 		while (nr_pages) { | 		while (nr_pages) { | ||||||
| 			s = rest_of_page(data); | 			s = rest_of_page(data); | ||||||
| 			pages[index++] = virt_to_page(data); | 			pages[index++] = kmap_to_page(data); | ||||||
| 			data += s; | 			data += s; | ||||||
| 			nr_pages--; | 			nr_pages--; | ||||||
| 		} | 		} | ||||||
|  |  | ||||||
|  | @ -179,29 +179,6 @@ static struct termios orig_term; | ||||||
| #define wmb() __asm__ __volatile__("" : : : "memory") | #define wmb() __asm__ __volatile__("" : : : "memory") | ||||||
| #define mb() __asm__ __volatile__("" : : : "memory") | #define mb() __asm__ __volatile__("" : : : "memory") | ||||||
| 
 | 
 | ||||||
| /*
 |  | ||||||
|  * Convert an iovec element to the given type. |  | ||||||
|  * |  | ||||||
|  * This is a fairly ugly trick: we need to know the size of the type and |  | ||||||
|  * alignment requirement to check the pointer is kosher.  It's also nice to |  | ||||||
|  * have the name of the type in case we report failure. |  | ||||||
|  * |  | ||||||
|  * Typing those three things all the time is cumbersome and error prone, so we |  | ||||||
|  * have a macro which sets them all up and passes to the real function. |  | ||||||
|  */ |  | ||||||
| #define convert(iov, type) \ |  | ||||||
| 	((type *)_convert((iov), sizeof(type), __alignof__(type), #type)) |  | ||||||
| 
 |  | ||||||
| static void *_convert(struct iovec *iov, size_t size, size_t align, |  | ||||||
| 		      const char *name) |  | ||||||
| { |  | ||||||
| 	if (iov->iov_len != size) |  | ||||||
| 		errx(1, "Bad iovec size %zu for %s", iov->iov_len, name); |  | ||||||
| 	if ((unsigned long)iov->iov_base % align != 0) |  | ||||||
| 		errx(1, "Bad alignment %p for %s", iov->iov_base, name); |  | ||||||
| 	return iov->iov_base; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /* Wrapper for the last available index.  Makes it easier to change. */ | /* Wrapper for the last available index.  Makes it easier to change. */ | ||||||
| #define lg_last_avail(vq)	((vq)->last_avail_idx) | #define lg_last_avail(vq)	((vq)->last_avail_idx) | ||||||
| 
 | 
 | ||||||
|  | @ -228,7 +205,8 @@ static bool iov_empty(const struct iovec iov[], unsigned int num_iov) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /* Take len bytes from the front of this iovec. */ | /* Take len bytes from the front of this iovec. */ | ||||||
| static void iov_consume(struct iovec iov[], unsigned num_iov, unsigned len) | static void iov_consume(struct iovec iov[], unsigned num_iov, | ||||||
|  | 			void *dest, unsigned len) | ||||||
| { | { | ||||||
| 	unsigned int i; | 	unsigned int i; | ||||||
| 
 | 
 | ||||||
|  | @ -236,11 +214,16 @@ static void iov_consume(struct iovec iov[], unsigned num_iov, unsigned len) | ||||||
| 		unsigned int used; | 		unsigned int used; | ||||||
| 
 | 
 | ||||||
| 		used = iov[i].iov_len < len ? iov[i].iov_len : len; | 		used = iov[i].iov_len < len ? iov[i].iov_len : len; | ||||||
|  | 		if (dest) { | ||||||
|  | 			memcpy(dest, iov[i].iov_base, used); | ||||||
|  | 			dest += used; | ||||||
|  | 		} | ||||||
| 		iov[i].iov_base += used; | 		iov[i].iov_base += used; | ||||||
| 		iov[i].iov_len -= used; | 		iov[i].iov_len -= used; | ||||||
| 		len -= used; | 		len -= used; | ||||||
| 	} | 	} | ||||||
| 	assert(len == 0); | 	if (len != 0) | ||||||
|  | 		errx(1, "iovec too short!"); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /* The device virtqueue descriptors are followed by feature bitmasks. */ | /* The device virtqueue descriptors are followed by feature bitmasks. */ | ||||||
|  | @ -864,7 +847,7 @@ static void console_output(struct virtqueue *vq) | ||||||
| 			warn("Write to stdout gave %i (%d)", len, errno); | 			warn("Write to stdout gave %i (%d)", len, errno); | ||||||
| 			break; | 			break; | ||||||
| 		} | 		} | ||||||
| 		iov_consume(iov, out, len); | 		iov_consume(iov, out, NULL, len); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
|  | @ -1591,9 +1574,9 @@ static void blk_request(struct virtqueue *vq) | ||||||
| { | { | ||||||
| 	struct vblk_info *vblk = vq->dev->priv; | 	struct vblk_info *vblk = vq->dev->priv; | ||||||
| 	unsigned int head, out_num, in_num, wlen; | 	unsigned int head, out_num, in_num, wlen; | ||||||
| 	int ret; | 	int ret, i; | ||||||
| 	u8 *in; | 	u8 *in; | ||||||
| 	struct virtio_blk_outhdr *out; | 	struct virtio_blk_outhdr out; | ||||||
| 	struct iovec iov[vq->vring.num]; | 	struct iovec iov[vq->vring.num]; | ||||||
| 	off64_t off; | 	off64_t off; | ||||||
| 
 | 
 | ||||||
|  | @ -1603,32 +1586,36 @@ static void blk_request(struct virtqueue *vq) | ||||||
| 	 */ | 	 */ | ||||||
| 	head = wait_for_vq_desc(vq, iov, &out_num, &in_num); | 	head = wait_for_vq_desc(vq, iov, &out_num, &in_num); | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/* Copy the output header from the front of the iov (adjusts iov) */ | ||||||
| 	 * Every block request should contain at least one output buffer | 	iov_consume(iov, out_num, &out, sizeof(out)); | ||||||
| 	 * (detailing the location on disk and the type of request) and one | 
 | ||||||
| 	 * input buffer (to hold the result). | 	/* Find and trim end of iov input array, for our status byte. */ | ||||||
| 	 */ | 	in = NULL; | ||||||
| 	if (out_num == 0 || in_num == 0) | 	for (i = out_num + in_num - 1; i >= out_num; i--) { | ||||||
| 		errx(1, "Bad virtblk cmd %u out=%u in=%u", | 		if (iov[i].iov_len > 0) { | ||||||
| 		     head, out_num, in_num); | 			in = iov[i].iov_base + iov[i].iov_len - 1; | ||||||
|  | 			iov[i].iov_len--; | ||||||
|  | 			break; | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	if (!in) | ||||||
|  | 		errx(1, "Bad virtblk cmd with no room for status"); | ||||||
| 
 | 
 | ||||||
| 	out = convert(&iov[0], struct virtio_blk_outhdr); |  | ||||||
| 	in = convert(&iov[out_num+in_num-1], u8); |  | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * For historical reasons, block operations are expressed in 512 byte | 	 * For historical reasons, block operations are expressed in 512 byte | ||||||
| 	 * "sectors". | 	 * "sectors". | ||||||
| 	 */ | 	 */ | ||||||
| 	off = out->sector * 512; | 	off = out.sector * 512; | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * In general the virtio block driver is allowed to try SCSI commands. | 	 * In general the virtio block driver is allowed to try SCSI commands. | ||||||
| 	 * It'd be nice if we supported eject, for example, but we don't. | 	 * It'd be nice if we supported eject, for example, but we don't. | ||||||
| 	 */ | 	 */ | ||||||
| 	if (out->type & VIRTIO_BLK_T_SCSI_CMD) { | 	if (out.type & VIRTIO_BLK_T_SCSI_CMD) { | ||||||
| 		fprintf(stderr, "Scsi commands unsupported\n"); | 		fprintf(stderr, "Scsi commands unsupported\n"); | ||||||
| 		*in = VIRTIO_BLK_S_UNSUPP; | 		*in = VIRTIO_BLK_S_UNSUPP; | ||||||
| 		wlen = sizeof(*in); | 		wlen = sizeof(*in); | ||||||
| 	} else if (out->type & VIRTIO_BLK_T_OUT) { | 	} else if (out.type & VIRTIO_BLK_T_OUT) { | ||||||
| 		/*
 | 		/*
 | ||||||
| 		 * Write | 		 * Write | ||||||
| 		 * | 		 * | ||||||
|  | @ -1636,10 +1623,10 @@ static void blk_request(struct virtqueue *vq) | ||||||
| 		 * if they try to write past end. | 		 * if they try to write past end. | ||||||
| 		 */ | 		 */ | ||||||
| 		if (lseek64(vblk->fd, off, SEEK_SET) != off) | 		if (lseek64(vblk->fd, off, SEEK_SET) != off) | ||||||
| 			err(1, "Bad seek to sector %llu", out->sector); | 			err(1, "Bad seek to sector %llu", out.sector); | ||||||
| 
 | 
 | ||||||
| 		ret = writev(vblk->fd, iov+1, out_num-1); | 		ret = writev(vblk->fd, iov, out_num); | ||||||
| 		verbose("WRITE to sector %llu: %i\n", out->sector, ret); | 		verbose("WRITE to sector %llu: %i\n", out.sector, ret); | ||||||
| 
 | 
 | ||||||
| 		/*
 | 		/*
 | ||||||
| 		 * Grr... Now we know how long the descriptor they sent was, we | 		 * Grr... Now we know how long the descriptor they sent was, we | ||||||
|  | @ -1655,7 +1642,7 @@ static void blk_request(struct virtqueue *vq) | ||||||
| 
 | 
 | ||||||
| 		wlen = sizeof(*in); | 		wlen = sizeof(*in); | ||||||
| 		*in = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR); | 		*in = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR); | ||||||
| 	} else if (out->type & VIRTIO_BLK_T_FLUSH) { | 	} else if (out.type & VIRTIO_BLK_T_FLUSH) { | ||||||
| 		/* Flush */ | 		/* Flush */ | ||||||
| 		ret = fdatasync(vblk->fd); | 		ret = fdatasync(vblk->fd); | ||||||
| 		verbose("FLUSH fdatasync: %i\n", ret); | 		verbose("FLUSH fdatasync: %i\n", ret); | ||||||
|  | @ -1669,10 +1656,9 @@ static void blk_request(struct virtqueue *vq) | ||||||
| 		 * if they try to read past end. | 		 * if they try to read past end. | ||||||
| 		 */ | 		 */ | ||||||
| 		if (lseek64(vblk->fd, off, SEEK_SET) != off) | 		if (lseek64(vblk->fd, off, SEEK_SET) != off) | ||||||
| 			err(1, "Bad seek to sector %llu", out->sector); | 			err(1, "Bad seek to sector %llu", out.sector); | ||||||
| 
 | 
 | ||||||
| 		ret = readv(vblk->fd, iov+1, in_num-1); | 		ret = readv(vblk->fd, iov + out_num, in_num); | ||||||
| 		verbose("READ from sector %llu: %i\n", out->sector, ret); |  | ||||||
| 		if (ret >= 0) { | 		if (ret >= 0) { | ||||||
| 			wlen = sizeof(*in) + ret; | 			wlen = sizeof(*in) + ret; | ||||||
| 			*in = VIRTIO_BLK_S_OK; | 			*in = VIRTIO_BLK_S_OK; | ||||||
|  | @ -1758,7 +1744,7 @@ static void rng_input(struct virtqueue *vq) | ||||||
| 		len = readv(rng_info->rfd, iov, in_num); | 		len = readv(rng_info->rfd, iov, in_num); | ||||||
| 		if (len <= 0) | 		if (len <= 0) | ||||||
| 			err(1, "Read from /dev/random gave %i", len); | 			err(1, "Read from /dev/random gave %i", len); | ||||||
| 		iov_consume(iov, in_num, len); | 		iov_consume(iov, in_num, NULL, len); | ||||||
| 		totlen += len; | 		totlen += len; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -164,7 +164,7 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq, | ||||||
| 				r = virtqueue_add_buf(vq->vq, &sl, 1, 0, | 				r = virtqueue_add_buf(vq->vq, &sl, 1, 0, | ||||||
| 						      dev->buf + started, | 						      dev->buf + started, | ||||||
| 						      GFP_ATOMIC); | 						      GFP_ATOMIC); | ||||||
| 				if (likely(r >= 0)) { | 				if (likely(r == 0)) { | ||||||
| 					++started; | 					++started; | ||||||
| 					virtqueue_kick(vq->vq); | 					virtqueue_kick(vq->vq); | ||||||
| 				} | 				} | ||||||
|  | @ -177,7 +177,7 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq, | ||||||
| 				r = 0; | 				r = 0; | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 		} while (r >= 0); | 		} while (r == 0); | ||||||
| 		if (completed == completed_before) | 		if (completed == completed_before) | ||||||
| 			++spurious; | 			++spurious; | ||||||
| 		assert(completed <= bufs); | 		assert(completed <= bufs); | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Linus Torvalds
				Linus Torvalds