Commit 48cf6061 authored by Laurent Vivier's avatar Laurent Vivier Committed by Linus Torvalds

NBD: allow nbd to be used locally

This patch allows Network Block Device to be mounted locally (nbd-client to
nbd-server over 127.0.0.1).

It creates a kthread to avoid the deadlock described in NBD tools
documentation.  So, if nbd-client hangs waiting for pages, the kblockd thread
can continue its work and free pages.

I have tested the patch to verify that it avoids the hang that always occurs
when writing to a localhost nbd connection.  I have also tested to verify that
no performance degradation results from the additional thread and queue.

Patch originally from Laurent Vivier.
Signed-off-by: default avatarPaul Clements <paul.clements@steeleye.com>
Signed-off-by: default avatarLaurent Vivier <Laurent.Vivier@bull.net>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8c4dd606
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <net/sock.h> #include <net/sock.h>
#include <linux/net.h> #include <linux/net.h>
#include <linux/kthread.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/system.h> #include <asm/system.h>
...@@ -441,31 +442,11 @@ static void nbd_clear_que(struct nbd_device *lo) ...@@ -441,31 +442,11 @@ static void nbd_clear_que(struct nbd_device *lo)
} }
/* static void nbd_handle_req(struct nbd_device *lo, struct request *req)
* We always wait for result of write, for now. It would be nice to make it optional
* in future
* if ((rq_data_dir(req) == WRITE) && (lo->flags & NBD_WRITE_NOCHK))
* { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
*/
static void do_nbd_request(struct request_queue * q)
{ {
struct request *req;
while ((req = elv_next_request(q)) != NULL) {
struct nbd_device *lo;
blkdev_dequeue_request(req);
dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
req->rq_disk->disk_name, req, req->cmd_type);
if (!blk_fs_request(req)) if (!blk_fs_request(req))
goto error_out; goto error_out;
lo = req->rq_disk->private_data;
BUG_ON(lo->magic != LO_MAGIC);
nbd_cmd(req) = NBD_CMD_READ; nbd_cmd(req) = NBD_CMD_READ;
if (rq_data_dir(req) == WRITE) { if (rq_data_dir(req) == WRITE) {
nbd_cmd(req) = NBD_CMD_WRITE; nbd_cmd(req) = NBD_CMD_WRITE;
...@@ -477,7 +458,6 @@ static void do_nbd_request(struct request_queue * q) ...@@ -477,7 +458,6 @@ static void do_nbd_request(struct request_queue * q)
} }
req->errors = 0; req->errors = 0;
spin_unlock_irq(q->queue_lock);
mutex_lock(&lo->tx_lock); mutex_lock(&lo->tx_lock);
if (unlikely(!lo->sock)) { if (unlikely(!lo->sock)) {
...@@ -486,8 +466,7 @@ static void do_nbd_request(struct request_queue * q) ...@@ -486,8 +466,7 @@ static void do_nbd_request(struct request_queue * q)
lo->disk->disk_name); lo->disk->disk_name);
req->errors++; req->errors++;
nbd_end_request(req); nbd_end_request(req);
spin_lock_irq(q->queue_lock); return;
continue;
} }
lo->active_req = req; lo->active_req = req;
...@@ -507,14 +486,73 @@ static void do_nbd_request(struct request_queue * q) ...@@ -507,14 +486,73 @@ static void do_nbd_request(struct request_queue * q)
mutex_unlock(&lo->tx_lock); mutex_unlock(&lo->tx_lock);
wake_up_all(&lo->active_wq); wake_up_all(&lo->active_wq);
spin_lock_irq(q->queue_lock); return;
continue;
error_out: error_out:
req->errors++; req->errors++;
spin_unlock(q->queue_lock);
nbd_end_request(req); nbd_end_request(req);
spin_lock(q->queue_lock); }
static int nbd_thread(void *data)
{
struct nbd_device *lo = data;
struct request *req;
set_user_nice(current, -20);
while (!kthread_should_stop() || !list_empty(&lo->waiting_queue)) {
/* wait for something to do */
wait_event_interruptible(lo->waiting_wq,
kthread_should_stop() ||
!list_empty(&lo->waiting_queue));
/* extract request */
if (list_empty(&lo->waiting_queue))
continue;
spin_lock_irq(&lo->queue_lock);
req = list_entry(lo->waiting_queue.next, struct request,
queuelist);
list_del_init(&req->queuelist);
spin_unlock_irq(&lo->queue_lock);
/* handle request */
nbd_handle_req(lo, req);
}
return 0;
}
/*
* We always wait for result of write, for now. It would be nice to make it optional
* in future
* if ((rq_data_dir(req) == WRITE) && (lo->flags & NBD_WRITE_NOCHK))
* { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
*/
static void do_nbd_request(struct request_queue * q)
{
struct request *req;
while ((req = elv_next_request(q)) != NULL) {
struct nbd_device *lo;
blkdev_dequeue_request(req);
spin_unlock_irq(q->queue_lock);
dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
req->rq_disk->disk_name, req, req->cmd_type);
lo = req->rq_disk->private_data;
BUG_ON(lo->magic != LO_MAGIC);
spin_lock_irq(&lo->queue_lock);
list_add_tail(&req->queuelist, &lo->waiting_queue);
spin_unlock_irq(&lo->queue_lock);
wake_up(&lo->waiting_wq);
spin_lock_irq(q->queue_lock);
} }
} }
...@@ -524,6 +562,7 @@ static int nbd_ioctl(struct inode *inode, struct file *file, ...@@ -524,6 +562,7 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
struct nbd_device *lo = inode->i_bdev->bd_disk->private_data; struct nbd_device *lo = inode->i_bdev->bd_disk->private_data;
int error; int error;
struct request sreq ; struct request sreq ;
struct task_struct *thread;
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
...@@ -606,7 +645,12 @@ static int nbd_ioctl(struct inode *inode, struct file *file, ...@@ -606,7 +645,12 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
case NBD_DO_IT: case NBD_DO_IT:
if (!lo->file) if (!lo->file)
return -EINVAL; return -EINVAL;
thread = kthread_create(nbd_thread, lo, lo->disk->disk_name);
if (IS_ERR(thread))
return PTR_ERR(thread);
wake_up_process(thread);
error = nbd_do_it(lo); error = nbd_do_it(lo);
kthread_stop(thread);
if (error) if (error)
return error; return error;
sock_shutdown(lo, 1); sock_shutdown(lo, 1);
...@@ -695,10 +739,12 @@ static int __init nbd_init(void) ...@@ -695,10 +739,12 @@ static int __init nbd_init(void)
nbd_dev[i].file = NULL; nbd_dev[i].file = NULL;
nbd_dev[i].magic = LO_MAGIC; nbd_dev[i].magic = LO_MAGIC;
nbd_dev[i].flags = 0; nbd_dev[i].flags = 0;
INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
spin_lock_init(&nbd_dev[i].queue_lock); spin_lock_init(&nbd_dev[i].queue_lock);
INIT_LIST_HEAD(&nbd_dev[i].queue_head); INIT_LIST_HEAD(&nbd_dev[i].queue_head);
mutex_init(&nbd_dev[i].tx_lock); mutex_init(&nbd_dev[i].tx_lock);
init_waitqueue_head(&nbd_dev[i].active_wq); init_waitqueue_head(&nbd_dev[i].active_wq);
init_waitqueue_head(&nbd_dev[i].waiting_wq);
nbd_dev[i].blksize = 1024; nbd_dev[i].blksize = 1024;
nbd_dev[i].bytesize = 0; nbd_dev[i].bytesize = 0;
disk->major = NBD_MAJOR; disk->major = NBD_MAJOR;
......
...@@ -56,9 +56,11 @@ struct nbd_device { ...@@ -56,9 +56,11 @@ struct nbd_device {
int magic; int magic;
spinlock_t queue_lock; spinlock_t queue_lock;
struct list_head queue_head;/* Requests are added here... */ struct list_head queue_head; /* Requests waiting result */
struct request *active_req; struct request *active_req;
wait_queue_head_t active_wq; wait_queue_head_t active_wq;
struct list_head waiting_queue; /* Requests to be sent */
wait_queue_head_t waiting_wq;
struct mutex tx_lock; struct mutex tx_lock;
struct gendisk *disk; struct gendisk *disk;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment