diff options
author | Lars-Peter Clausen <lars@metafoo.de> | 2013-03-07 19:53:00 +0000 |
---|---|---|
committer | Jonathan Cameron <jic23@kernel.org> | 2013-03-17 20:04:45 +0000 |
commit | 1b9dc91e41e07125ef2ce7d0a8dde93ce3eaf414 (patch) | |
tree | 7623bf51b752090cf4f6f4266670a56adbdc40ec /drivers/iio | |
parent | b9606e2aa97d3d831d1236c0e789a33a2f867a8a (diff) |
iio: events: Make iio_push_event() IRQ context save
Currently it is not save to call iio_push_event() from hard IRQ context since
the IIO event code uses spin_lock()/spin_unlock() and it is not save to mix
calls to spin_lock()/spin_unlock() from different contexts on the same lock.
E.g. if the lock is being held in iio_event_chrdev_read() and an interrupts
kicks in and the interrupt handler calls iio_push_event() we end uo with a
deadlock.
This patch updates iio_push_event() to use spin_lock_irqsave()/
spin_unlock_irqstrestore(), since it can be called from both IRQ and non-IRQ
context. All other other users of the lock, which are always run in non-IRQ
context, are updated to spin_lock_irq()/spin_unlock_irq().
Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
Signed-off-by: Jonathan Cameron <jic23@kernel.org>
Diffstat (limited to 'drivers/iio')
-rw-r--r-- | drivers/iio/industrialio-event.c | 29 |
1 files changed, 15 insertions, 14 deletions
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c index 261cae00557e..10aa9ef86cec 100644 --- a/drivers/iio/industrialio-event.c +++ b/drivers/iio/industrialio-event.c @@ -46,10 +46,11 @@ int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp) { struct iio_event_interface *ev_int = indio_dev->event_interface; struct iio_event_data ev; + unsigned long flags; int copied; /* Does anyone care? */ - spin_lock(&ev_int->wait.lock); + spin_lock_irqsave(&ev_int->wait.lock, flags); if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) { ev.id = ev_code; @@ -59,7 +60,7 @@ int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp) if (copied != 0) wake_up_locked_poll(&ev_int->wait, POLLIN); } - spin_unlock(&ev_int->wait.lock); + spin_unlock_irqrestore(&ev_int->wait.lock, flags); return 0; } @@ -76,10 +77,10 @@ static unsigned int iio_event_poll(struct file *filep, poll_wait(filep, &ev_int->wait, wait); - spin_lock(&ev_int->wait.lock); + spin_lock_irq(&ev_int->wait.lock); if (!kfifo_is_empty(&ev_int->det_events)) events = POLLIN | POLLRDNORM; - spin_unlock(&ev_int->wait.lock); + spin_unlock_irq(&ev_int->wait.lock); return events; } @@ -96,14 +97,14 @@ static ssize_t iio_event_chrdev_read(struct file *filep, if (count < sizeof(struct iio_event_data)) return -EINVAL; - spin_lock(&ev_int->wait.lock); + spin_lock_irq(&ev_int->wait.lock); if (kfifo_is_empty(&ev_int->det_events)) { if (filep->f_flags & O_NONBLOCK) { ret = -EAGAIN; goto error_unlock; } /* Blocking on device; waiting for something to be there */ - ret = wait_event_interruptible_locked(ev_int->wait, + ret = wait_event_interruptible_locked_irq(ev_int->wait, !kfifo_is_empty(&ev_int->det_events)); if (ret) goto error_unlock; @@ -113,7 +114,7 @@ static ssize_t iio_event_chrdev_read(struct file *filep, ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied); error_unlock: - spin_unlock(&ev_int->wait.lock); + spin_unlock_irq(&ev_int->wait.lock); return ret ? ret : copied; } @@ -122,7 +123,7 @@ static int iio_event_chrdev_release(struct inode *inode, struct file *filep) { struct iio_event_interface *ev_int = filep->private_data; - spin_lock(&ev_int->wait.lock); + spin_lock_irq(&ev_int->wait.lock); __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); /* * In order to maintain a clean state for reopening, @@ -130,7 +131,7 @@ static int iio_event_chrdev_release(struct inode *inode, struct file *filep) * any new __iio_push_event calls running. */ kfifo_reset_out(&ev_int->det_events); - spin_unlock(&ev_int->wait.lock); + spin_unlock_irq(&ev_int->wait.lock); return 0; } @@ -151,18 +152,18 @@ int iio_event_getfd(struct iio_dev *indio_dev) if (ev_int == NULL) return -ENODEV; - spin_lock(&ev_int->wait.lock); + spin_lock_irq(&ev_int->wait.lock); if (__test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) { - spin_unlock(&ev_int->wait.lock); + spin_unlock_irq(&ev_int->wait.lock); return -EBUSY; } - spin_unlock(&ev_int->wait.lock); + spin_unlock_irq(&ev_int->wait.lock); fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops, ev_int, O_RDONLY); if (fd < 0) { - spin_lock(&ev_int->wait.lock); + spin_lock_irq(&ev_int->wait.lock); __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); - spin_unlock(&ev_int->wait.lock); + spin_unlock_irq(&ev_int->wait.lock); } return fd; } |