summaryrefslogtreecommitdiff
path: root/drivers/block/drbd/drbd_worker.c
diff options
context:
space:
mode:
authorLars Ellenberg <lars.ellenberg@linbit.com>2014-04-28 11:43:21 +0200
committerPhilipp Reisner <philipp.reisner@linbit.com>2014-07-10 18:35:07 +0200
commit15e26f6a3c6de2c665b4a30b9a70a902111f281f (patch)
treed8050e5998e46e3f6f541b3941dd727714ae07ba /drivers/block/drbd/drbd_worker.c
parent7f34f61490ee87a470cf229069d59b0987f42a59 (diff)
drbd: add drbd_queue_work_if_unqueued helper
We sometimes do if (list_empty(&w.list)) drbd_queue_work(&q, &w.list); Removal (list_del_init) may happen outside all locks, after all pending work entries have been moved to an on-stack local work list. For not dynamically allocated, but embeded, work structs, we must avoid to re-add until it really was removed. Move that list_empty check inside the spin_lock(&q->q_lock) within the helper function, and change to list_empty_careful(). This may have been the reason for a list_add corruption inside drbd_queue_work(). Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block/drbd/drbd_worker.c')
-rw-r--r--drivers/block/drbd/drbd_worker.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 9f0acb011f30..49b88731b349 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -452,9 +452,9 @@ void resync_timer_fn(unsigned long data)
{
struct drbd_device *device = (struct drbd_device *) data;
- if (list_empty(&device->resync_work.list))
- drbd_queue_work(&first_peer_device(device)->connection->sender_work,
- &device->resync_work);
+ drbd_queue_work_if_unqueued(
+ &first_peer_device(device)->connection->sender_work,
+ &device->resync_work);
}
static void fifo_set(struct fifo_buffer *fb, int value)
@@ -1968,7 +1968,7 @@ static void do_unqueued_work(struct drbd_connection *connection)
static bool dequeue_work_batch(struct drbd_work_queue *queue, struct list_head *work_list)
{
spin_lock_irq(&queue->q_lock);
- list_splice_init(&queue->q, work_list);
+ list_splice_tail_init(&queue->q, work_list);
spin_unlock_irq(&queue->q_lock);
return !list_empty(work_list);
}