diff options
| author | Jakub Kicinski <kuba@kernel.org> | 2025-07-24 18:34:59 -0700 |
|---|---|---|
| committer | Jakub Kicinski <kuba@kernel.org> | 2025-07-24 18:34:59 -0700 |
| commit | 89628a0ec78718481d75d6a5d49c47862cd28d44 (patch) | |
| tree | d8223d0f701032f1979ed7cbbc2b6b8ad8407864 /net/core/dev.c | |
| parent | d2002ccb47dd3bf6102d06c8e5062ccfdd31ce28 (diff) | |
| parent | 8e7583a4f65f3dbf3e8deb4e60f3679c276bef62 (diff) | |
Merge branch 'use-enum-to-represent-the-napi-threaded-state'
Samiullah Khawaja says:
====================
Use enum to represent the NAPI threaded state
Instead of using 0/1 to represent the NAPI threaded states use enum
(disabled/enabled) to represent the NAPI threaded states.
This patch series is a subset of patches from the following patch series:
https://lore.kernel.org/20250718232052.1266188-1-skhawaja@google.com
The first 3 patches are being sent separately as per the feedback to
replace the usage of 0/1 as NAPI threaded states with enum. See:
https://lore.kernel.org/20250721164856.1d2208e4@kernel.org
====================
Link: https://patch.msgid.link/20250723013031.2911384-1-skhawaja@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/core/dev.c')
| -rw-r--r-- | net/core/dev.c | 37 |
1 files changed, 26 insertions, 11 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 354d3453b407..1c6e755841ce 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6578,8 +6578,7 @@ bool napi_complete_done(struct napi_struct *n, int work_done) * it, we need to bound somehow the time packets are kept in * the GRO layer. */ - gro_flush(&n->gro, !!timeout); - gro_normal_list(&n->gro); + gro_flush_normal(&n->gro, !!timeout); if (unlikely(!list_empty(&n->poll_list))) { /* If n->poll_list is not empty, we need to mask irqs */ @@ -6649,8 +6648,7 @@ static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule) } /* Flush too old packets. If HZ < 1000, flush all packets */ - gro_flush(&napi->gro, HZ >= 1000); - gro_normal_list(&napi->gro); + gro_flush_normal(&napi->gro, HZ >= 1000); clear_bit(NAPI_STATE_SCHED, &napi->state); } @@ -6965,7 +6963,8 @@ static void napi_stop_kthread(struct napi_struct *napi) napi->thread = NULL; } -int napi_set_threaded(struct napi_struct *napi, bool threaded) +int napi_set_threaded(struct napi_struct *napi, + enum netdev_napi_threaded threaded) { if (threaded) { if (!napi->thread) { @@ -6990,7 +6989,8 @@ int napi_set_threaded(struct napi_struct *napi, bool threaded) return 0; } -int netif_set_threaded(struct net_device *dev, bool threaded) +int netif_set_threaded(struct net_device *dev, + enum netdev_napi_threaded threaded) { struct napi_struct *napi; int err = 0; @@ -7002,7 +7002,7 @@ int netif_set_threaded(struct net_device *dev, bool threaded) if (!napi->thread) { err = napi_kthread_create(napi); if (err) { - threaded = false; + threaded = NETDEV_NAPI_THREADED_DISABLED; break; } } @@ -7031,7 +7031,23 @@ int netif_set_threaded(struct net_device *dev, bool threaded) return err; } -EXPORT_SYMBOL(netif_set_threaded); + +/** + * netif_threaded_enable() - enable threaded NAPIs + * @dev: net_device instance + * + * Enable threaded mode for the NAPI instances of the device. This may be useful + * for devices where multiple NAPI instances get scheduled by a single + * interrupt. Threaded NAPI allows moving the NAPI processing to cores other + * than the core where IRQ is mapped. + * + * This function should be called before @dev is registered. + */ +void netif_threaded_enable(struct net_device *dev) +{ + WARN_ON_ONCE(netif_set_threaded(dev, NETDEV_NAPI_THREADED_ENABLED)); +} +EXPORT_SYMBOL(netif_threaded_enable); /** * netif_queue_set_napi - Associate queue with the napi @@ -7346,7 +7362,7 @@ void netif_napi_add_weight_locked(struct net_device *dev, * threaded mode will not be enabled in napi_enable(). */ if (dev->threaded && napi_kthread_create(napi)) - dev->threaded = false; + dev->threaded = NETDEV_NAPI_THREADED_DISABLED; netif_napi_set_irq_locked(napi, -1); } EXPORT_SYMBOL(netif_napi_add_weight_locked); @@ -7515,8 +7531,7 @@ static int __napi_poll(struct napi_struct *n, bool *repoll) } /* Flush too old packets. If HZ < 1000, flush all packets */ - gro_flush(&n->gro, HZ >= 1000); - gro_normal_list(&n->gro); + gro_flush_normal(&n->gro, HZ >= 1000); /* Some drivers may have called napi_schedule * prior to exhausting their budget. |
