summaryrefslogtreecommitdiff
path: root/fs/eventpoll.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/eventpoll.c')
-rw-r--r--fs/eventpoll.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 77b9953624f4..34f68f3a069a 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -325,15 +325,14 @@ static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq)
int wake_nests = 0;
unsigned long flags;
struct task_struct *this_task = current;
- struct list_head *lsthead = &psw->wake_task_list, *lnk;
+ struct list_head *lsthead = &psw->wake_task_list;
struct wake_task_node *tncur;
struct wake_task_node tnode;
spin_lock_irqsave(&psw->lock, flags);
/* Try to see if the current task is already inside this wakeup call */
- list_for_each(lnk, lsthead) {
- tncur = list_entry(lnk, struct wake_task_node, llink);
+ list_for_each_entry(tncur, lsthead, llink) {
if (tncur->wq == wq ||
(tncur->task == this_task && ++wake_nests > EP_MAX_POLLWAKE_NESTS)) {
@@ -463,7 +462,7 @@ static void ep_free(struct eventpoll *ep)
* holding "epmutex" we can be sure that no file cleanup code will hit
* us during this operation. So we can avoid the lock on "ep->lock".
*/
- while ((rbp = rb_first(&ep->rbr)) != 0) {
+ while ((rbp = rb_first(&ep->rbr)) != NULL) {
epi = rb_entry(rbp, struct epitem, rbn);
ep_remove(ep, epi);
}