summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-31 16:58:50 -0700
committerDavid S. Miller <davem@davemloft.net>2008-07-31 16:58:50 -0700
commitc3f26a269c2421f97f10cf8ed05d5099b573af4d (patch)
treed0602cbb48742b3e39ab6bdcaa08c342d4cd2cae /include/linux
parent967ab999a090b1a4e7d3c7febfd6d89b42fb4cf4 (diff)
netdev: Fix lockdep warnings in multiqueue configurations.
When support for multiple TX queues were added, the netif_tx_lock() routines we converted to iterate over all TX queues and grab each queue's spinlock. This causes heartburn for lockdep and it's not a healthy thing to do with lots of TX queues anyways. So modify this to use a top-level lock and a "frozen" state for the individual TX queues. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/netdevice.h86
1 files changed, 55 insertions, 31 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index b4d056ceab96..ee583f642a9f 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -440,6 +440,7 @@ static inline void napi_synchronize(const struct napi_struct *n)
enum netdev_queue_state_t
{
__QUEUE_STATE_XOFF,
+ __QUEUE_STATE_FROZEN,
};
struct netdev_queue {
@@ -636,7 +637,7 @@ struct net_device
unsigned int real_num_tx_queues;
unsigned long tx_queue_len; /* Max frames per queue allowed */
-
+ spinlock_t tx_global_lock;
/*
* One part is mostly used on xmit path (device)
*/
@@ -1099,6 +1100,11 @@ static inline int netif_queue_stopped(const struct net_device *dev)
return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
}
+static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue)
+{
+ return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state);
+}
+
/**
* netif_running - test if up
* @dev: network device
@@ -1475,6 +1481,26 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
txq->xmit_lock_owner = smp_processor_id();
}
+static inline int __netif_tx_trylock(struct netdev_queue *txq)
+{
+ int ok = spin_trylock(&txq->_xmit_lock);
+ if (likely(ok))
+ txq->xmit_lock_owner = smp_processor_id();
+ return ok;
+}
+
+static inline void __netif_tx_unlock(struct netdev_queue *txq)
+{
+ txq->xmit_lock_owner = -1;
+ spin_unlock(&txq->_xmit_lock);
+}
+
+static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
+{
+ txq->xmit_lock_owner = -1;
+ spin_unlock_bh(&txq->_xmit_lock);
+}
+
/**
* netif_tx_lock - grab network device transmit lock
* @dev: network device
@@ -1484,12 +1510,23 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
*/
static inline void netif_tx_lock(struct net_device *dev)
{
- int cpu = smp_processor_id();
unsigned int i;
+ int cpu;
+ spin_lock(&dev->tx_global_lock);
+ cpu = smp_processor_id();
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+
+ /* We are the only thread of execution doing a
+ * freeze, but we have to grab the _xmit_lock in
+ * order to synchronize with threads which are in
+ * the ->hard_start_xmit() handler and already
+ * checked the frozen bit.
+ */
__netif_tx_lock(txq, cpu);
+ set_bit(__QUEUE_STATE_FROZEN, &txq->state);
+ __netif_tx_unlock(txq);
}
}
@@ -1499,40 +1536,22 @@ static inline void netif_tx_lock_bh(struct net_device *dev)
netif_tx_lock(dev);
}
-static inline int __netif_tx_trylock(struct netdev_queue *txq)
-{
- int ok = spin_trylock(&txq->_xmit_lock);
- if (likely(ok))
- txq->xmit_lock_owner = smp_processor_id();
- return ok;
-}
-
-static inline int netif_tx_trylock(struct net_device *dev)
-{
- return __netif_tx_trylock(netdev_get_tx_queue(dev, 0));
-}
-
-static inline void __netif_tx_unlock(struct netdev_queue *txq)
-{
- txq->xmit_lock_owner = -1;
- spin_unlock(&txq->_xmit_lock);
-}
-
-static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
-{
- txq->xmit_lock_owner = -1;
- spin_unlock_bh(&txq->_xmit_lock);
-}
-
static inline void netif_tx_unlock(struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- __netif_tx_unlock(txq);
- }
+ /* No need to grab the _xmit_lock here. If the
+ * queue is not stopped for another reason, we
+ * force a schedule.
+ */
+ clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
+ if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
+ __netif_schedule(txq->qdisc);
+ }
+ spin_unlock(&dev->tx_global_lock);
}
static inline void netif_tx_unlock_bh(struct net_device *dev)
@@ -1556,13 +1575,18 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
static inline void netif_tx_disable(struct net_device *dev)
{
unsigned int i;
+ int cpu;
- netif_tx_lock_bh(dev);
+ local_bh_disable();
+ cpu = smp_processor_id();
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+
+ __netif_tx_lock(txq, cpu);
netif_tx_stop_queue(txq);
+ __netif_tx_unlock(txq);
}
- netif_tx_unlock_bh(dev);
+ local_bh_enable();
}
static inline void netif_addr_lock(struct net_device *dev)