Commit acd042bb authored by Evgeniy Polyakov's avatar Evgeniy Polyakov Committed by David S. Miller

[CONNECTOR]: async connector mode.

If input message rate from userspace is too high, do not drop them,
but try to deliver using work queue allocation.

Failing there is some kind of congestion control.

It also removes warn_on on this condition, which scares people.
Signed-off-by: default avatarEvgeniy Polyakov <johnpol@2ka.mipt.ru>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b9d717a7
...@@ -31,16 +31,19 @@ ...@@ -31,16 +31,19 @@
#include <linux/connector.h> #include <linux/connector.h>
#include <linux/delay.h> #include <linux/delay.h>
static void cn_queue_wrapper(void *data) void cn_queue_wrapper(void *data)
{ {
struct cn_callback_entry *cbq = data; struct cn_callback_data *d = data;
cbq->cb->callback(cbq->cb->priv); d->callback(d->callback_priv);
cbq->destruct_data(cbq->ddata);
cbq->ddata = NULL; d->destruct_data(d->ddata);
d->ddata = NULL;
kfree(d->free);
} }
static struct cn_callback_entry *cn_queue_alloc_callback_entry(struct cn_callback *cb) static struct cn_callback_entry *cn_queue_alloc_callback_entry(char *name, struct cb_id *id, void (*callback)(void *))
{ {
struct cn_callback_entry *cbq; struct cn_callback_entry *cbq;
...@@ -50,8 +53,11 @@ static struct cn_callback_entry *cn_queue_alloc_callback_entry(struct cn_callbac ...@@ -50,8 +53,11 @@ static struct cn_callback_entry *cn_queue_alloc_callback_entry(struct cn_callbac
return NULL; return NULL;
} }
cbq->cb = cb; snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name);
INIT_WORK(&cbq->work, &cn_queue_wrapper, cbq); memcpy(&cbq->id.id, id, sizeof(struct cb_id));
cbq->data.callback = callback;
INIT_WORK(&cbq->work, &cn_queue_wrapper, &cbq->data);
return cbq; return cbq;
} }
...@@ -68,12 +74,12 @@ int cn_cb_equal(struct cb_id *i1, struct cb_id *i2) ...@@ -68,12 +74,12 @@ int cn_cb_equal(struct cb_id *i1, struct cb_id *i2)
return ((i1->idx == i2->idx) && (i1->val == i2->val)); return ((i1->idx == i2->idx) && (i1->val == i2->val));
} }
int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb) int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *))
{ {
struct cn_callback_entry *cbq, *__cbq; struct cn_callback_entry *cbq, *__cbq;
int found = 0; int found = 0;
cbq = cn_queue_alloc_callback_entry(cb); cbq = cn_queue_alloc_callback_entry(name, id, callback);
if (!cbq) if (!cbq)
return -ENOMEM; return -ENOMEM;
...@@ -82,7 +88,7 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb) ...@@ -82,7 +88,7 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb)
spin_lock_bh(&dev->queue_lock); spin_lock_bh(&dev->queue_lock);
list_for_each_entry(__cbq, &dev->queue_list, callback_entry) { list_for_each_entry(__cbq, &dev->queue_list, callback_entry) {
if (cn_cb_equal(&__cbq->cb->id, &cb->id)) { if (cn_cb_equal(&__cbq->id.id, id)) {
found = 1; found = 1;
break; break;
} }
...@@ -99,7 +105,7 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb) ...@@ -99,7 +105,7 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb)
cbq->nls = dev->nls; cbq->nls = dev->nls;
cbq->seq = 0; cbq->seq = 0;
cbq->group = cbq->cb->id.idx; cbq->group = cbq->id.id.idx;
return 0; return 0;
} }
...@@ -111,7 +117,7 @@ void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id) ...@@ -111,7 +117,7 @@ void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id)
spin_lock_bh(&dev->queue_lock); spin_lock_bh(&dev->queue_lock);
list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) { list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) {
if (cn_cb_equal(&cbq->cb->id, id)) { if (cn_cb_equal(&cbq->id.id, id)) {
list_del(&cbq->callback_entry); list_del(&cbq->callback_entry);
found = 1; found = 1;
break; break;
......
...@@ -84,7 +84,7 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, int gfp_mask) ...@@ -84,7 +84,7 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, int gfp_mask)
spin_lock_bh(&dev->cbdev->queue_lock); spin_lock_bh(&dev->cbdev->queue_lock);
list_for_each_entry(__cbq, &dev->cbdev->queue_list, list_for_each_entry(__cbq, &dev->cbdev->queue_list,
callback_entry) { callback_entry) {
if (cn_cb_equal(&__cbq->cb->id, &msg->id)) { if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
found = 1; found = 1;
group = __cbq->group; group = __cbq->group;
} }
...@@ -127,42 +127,56 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v ...@@ -127,42 +127,56 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v
{ {
struct cn_callback_entry *__cbq; struct cn_callback_entry *__cbq;
struct cn_dev *dev = &cdev; struct cn_dev *dev = &cdev;
int found = 0; int err = -ENODEV;
spin_lock_bh(&dev->cbdev->queue_lock); spin_lock_bh(&dev->cbdev->queue_lock);
list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) { list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) {
if (cn_cb_equal(&__cbq->cb->id, &msg->id)) { if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
/*
* Let's scream if there is some magic and the
* data will arrive asynchronously here.
* [i.e. netlink messages will be queued].
* After the first warning I will fix it
* quickly, but now I think it is
* impossible. --zbr (2004_04_27).
*/
if (likely(!test_bit(0, &__cbq->work.pending) && if (likely(!test_bit(0, &__cbq->work.pending) &&
__cbq->ddata == NULL)) { __cbq->data.ddata == NULL)) {
__cbq->cb->priv = msg; __cbq->data.callback_priv = msg;
__cbq->ddata = data; __cbq->data.ddata = data;
__cbq->destruct_data = destruct_data; __cbq->data.destruct_data = destruct_data;
if (queue_work(dev->cbdev->cn_queue, if (queue_work(dev->cbdev->cn_queue,
&__cbq->work)) &__cbq->work))
found = 1; err = 0;
} else { } else {
printk("%s: cbq->data=%p, " struct work_struct *w;
"work->pending=%08lx.\n", struct cn_callback_data *d;
__func__, __cbq->ddata,
__cbq->work.pending); w = kzalloc(sizeof(*w) + sizeof(*d), GFP_ATOMIC);
WARN_ON(1); if (w) {
d = (struct cn_callback_data *)(w+1);
d->callback_priv = msg;
d->callback = __cbq->data.callback;
d->ddata = data;
d->destruct_data = destruct_data;
d->free = w;
INIT_LIST_HEAD(&w->entry);
w->pending = 0;
w->func = &cn_queue_wrapper;
w->data = d;
init_timer(&w->timer);
if (queue_work(dev->cbdev->cn_queue, w))
err = 0;
else {
kfree(w);
err = -EINVAL;
}
} else
err = -ENOMEM;
} }
break; break;
} }
} }
spin_unlock_bh(&dev->cbdev->queue_lock); spin_unlock_bh(&dev->cbdev->queue_lock);
return found ? 0 : -ENODEV; return err;
} }
/* /*
...@@ -291,22 +305,10 @@ int cn_add_callback(struct cb_id *id, char *name, void (*callback)(void *)) ...@@ -291,22 +305,10 @@ int cn_add_callback(struct cb_id *id, char *name, void (*callback)(void *))
{ {
int err; int err;
struct cn_dev *dev = &cdev; struct cn_dev *dev = &cdev;
struct cn_callback *cb;
cb = kzalloc(sizeof(*cb), GFP_KERNEL); err = cn_queue_add_callback(dev->cbdev, name, id, callback);
if (!cb) if (err)
return -ENOMEM;
scnprintf(cb->name, sizeof(cb->name), "%s", name);
memcpy(&cb->id, id, sizeof(cb->id));
cb->callback = callback;
err = cn_queue_add_callback(dev->cbdev, cb);
if (err) {
kfree(cb);
return err; return err;
}
cn_notify(id, 0); cn_notify(id, 0);
......
...@@ -104,12 +104,19 @@ struct cn_queue_dev { ...@@ -104,12 +104,19 @@ struct cn_queue_dev {
struct sock *nls; struct sock *nls;
}; };
struct cn_callback { struct cn_callback_id {
unsigned char name[CN_CBQ_NAMELEN]; unsigned char name[CN_CBQ_NAMELEN];
struct cb_id id; struct cb_id id;
};
struct cn_callback_data {
void (*destruct_data) (void *);
void *ddata;
void *callback_priv;
void (*callback) (void *); void (*callback) (void *);
void *priv;
void *free;
}; };
struct cn_callback_entry { struct cn_callback_entry {
...@@ -118,8 +125,8 @@ struct cn_callback_entry { ...@@ -118,8 +125,8 @@ struct cn_callback_entry {
struct work_struct work; struct work_struct work;
struct cn_queue_dev *pdev; struct cn_queue_dev *pdev;
void (*destruct_data) (void *); struct cn_callback_id id;
void *ddata; struct cn_callback_data data;
int seq, group; int seq, group;
struct sock *nls; struct sock *nls;
...@@ -144,7 +151,7 @@ int cn_add_callback(struct cb_id *, char *, void (*callback) (void *)); ...@@ -144,7 +151,7 @@ int cn_add_callback(struct cb_id *, char *, void (*callback) (void *));
void cn_del_callback(struct cb_id *); void cn_del_callback(struct cb_id *);
int cn_netlink_send(struct cn_msg *, u32, int); int cn_netlink_send(struct cn_msg *, u32, int);
int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb); int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *));
void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id); void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *); struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *);
...@@ -152,6 +159,8 @@ void cn_queue_free_dev(struct cn_queue_dev *dev); ...@@ -152,6 +159,8 @@ void cn_queue_free_dev(struct cn_queue_dev *dev);
int cn_cb_equal(struct cb_id *, struct cb_id *); int cn_cb_equal(struct cb_id *, struct cb_id *);
void cn_queue_wrapper(void *data);
extern int cn_already_initialized; extern int cn_already_initialized;
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment