diff options
Diffstat (limited to 'drivers/net/sfc/ethtool.c')
-rw-r--r-- | drivers/net/sfc/ethtool.c | 181 |
1 files changed, 167 insertions, 14 deletions
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c index fd19d6ab97a2..edb9d16b8b47 100644 --- a/drivers/net/sfc/ethtool.c +++ b/drivers/net/sfc/ethtool.c @@ -15,6 +15,7 @@ #include "workarounds.h" #include "selftest.h" #include "efx.h" +#include "filter.h" #include "nic.h" #include "spi.h" #include "mdio_10g.h" @@ -186,8 +187,8 @@ static int efx_ethtool_phys_id(struct net_device *net_dev, u32 count) } /* This must be called with rtnl_lock held. */ -int efx_ethtool_get_settings(struct net_device *net_dev, - struct ethtool_cmd *ecmd) +static int efx_ethtool_get_settings(struct net_device *net_dev, + struct ethtool_cmd *ecmd) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_link_state *link_state = &efx->link_state; @@ -210,8 +211,8 @@ int efx_ethtool_get_settings(struct net_device *net_dev, } /* This must be called with rtnl_lock held. */ -int efx_ethtool_set_settings(struct net_device *net_dev, - struct ethtool_cmd *ecmd) +static int efx_ethtool_set_settings(struct net_device *net_dev, + struct ethtool_cmd *ecmd) { struct efx_nic *efx = netdev_priv(net_dev); int rc; @@ -328,9 +329,10 @@ static int efx_fill_loopback_test(struct efx_nic *efx, unsigned int test_index, struct ethtool_string *strings, u64 *data) { + struct efx_channel *channel = efx_get_channel(efx, 0); struct efx_tx_queue *tx_queue; - efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) { + efx_for_each_channel_tx_queue(tx_queue, channel) { efx_fill_test(test_index++, strings, data, &lb_tests->tx_sent[tx_queue->queue], EFX_TX_QUEUE_NAME(tx_queue), @@ -550,9 +552,22 @@ static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev) static int efx_ethtool_set_flags(struct net_device *net_dev, u32 data) { struct efx_nic *efx = netdev_priv(net_dev); - u32 supported = efx->type->offload_features & ETH_FLAG_RXHASH; + u32 supported = (efx->type->offload_features & + (ETH_FLAG_RXHASH | ETH_FLAG_NTUPLE)); + int rc; + + rc = ethtool_op_set_flags(net_dev, data, supported); + if (rc) + return rc; - return ethtool_op_set_flags(net_dev, data, supported); + if (!(data & ETH_FLAG_NTUPLE)) { + efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, + EFX_FILTER_PRI_MANUAL); + efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, + EFX_FILTER_PRI_MANUAL); + } + + return 0; } static void efx_ethtool_self_test(struct net_device *net_dev, @@ -673,15 +688,15 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev, struct ethtool_coalesce *coalesce) { struct efx_nic *efx = netdev_priv(net_dev); - struct efx_tx_queue *tx_queue; struct efx_channel *channel; memset(coalesce, 0, sizeof(*coalesce)); /* Find lowest IRQ moderation across all used TX queues */ coalesce->tx_coalesce_usecs_irq = ~((u32) 0); - efx_for_each_tx_queue(tx_queue, efx) { - channel = tx_queue->channel; + efx_for_each_channel(channel, efx) { + if (!efx_channel_get_tx_queue(channel, 0)) + continue; if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) { if (channel->channel < efx->n_rx_channels) coalesce->tx_coalesce_usecs_irq = @@ -708,7 +723,6 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev, { struct efx_nic *efx = netdev_priv(net_dev); struct efx_channel *channel; - struct efx_tx_queue *tx_queue; unsigned tx_usecs, rx_usecs, adaptive; if (coalesce->use_adaptive_tx_coalesce) @@ -725,8 +739,9 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev, adaptive = coalesce->use_adaptive_rx_coalesce; /* If the channel is shared only allow RX parameters to be set */ - efx_for_each_tx_queue(tx_queue, efx) { - if ((tx_queue->channel->channel < efx->n_rx_channels) && + efx_for_each_channel(channel, efx) { + if (efx_channel_get_rx_queue(channel) && + efx_channel_get_tx_queue(channel, 0) && tx_usecs) { netif_err(efx, drv, efx->net_dev, "Channel is shared. " "Only RX coalescing may be set\n"); @@ -741,6 +756,42 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev, return 0; } +static void efx_ethtool_get_ringparam(struct net_device *net_dev, + struct ethtool_ringparam *ring) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + ring->rx_max_pending = EFX_MAX_DMAQ_SIZE; + ring->tx_max_pending = EFX_MAX_DMAQ_SIZE; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = efx->rxq_entries; + ring->tx_pending = efx->txq_entries; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int efx_ethtool_set_ringparam(struct net_device *net_dev, + struct ethtool_ringparam *ring) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (ring->rx_mini_pending || ring->rx_jumbo_pending || + ring->rx_pending > EFX_MAX_DMAQ_SIZE || + ring->tx_pending > EFX_MAX_DMAQ_SIZE) + return -EINVAL; + + if (ring->rx_pending < EFX_MIN_RING_SIZE || + ring->tx_pending < EFX_MIN_RING_SIZE) { + netif_err(efx, drv, efx->net_dev, + "TX and RX queues cannot be smaller than %ld\n", + EFX_MIN_RING_SIZE); + return -EINVAL; + } + + return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending); +} + static int efx_ethtool_set_pauseparam(struct net_device *net_dev, struct ethtool_pauseparam *pause) { @@ -840,7 +891,7 @@ static int efx_ethtool_set_wol(struct net_device *net_dev, return efx->type->set_wol(efx, wol->wolopts); } -extern int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) +static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) { struct efx_nic *efx = netdev_priv(net_dev); enum reset_type method; @@ -918,6 +969,105 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev, } } +static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev, + struct ethtool_rx_ntuple *ntuple) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct ethtool_tcpip4_spec *ip_entry = &ntuple->fs.h_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *ip_mask = &ntuple->fs.m_u.tcp_ip4_spec; + struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec; + struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec; + struct efx_filter_spec filter; + + /* Range-check action */ + if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR || + ntuple->fs.action >= (s32)efx->n_rx_channels) + return -EINVAL; + + if (~ntuple->fs.data_mask) + return -EINVAL; + + switch (ntuple->fs.flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + /* Must match all of destination, */ + if (ip_mask->ip4dst | ip_mask->pdst) + return -EINVAL; + /* all or none of source, */ + if ((ip_mask->ip4src | ip_mask->psrc) && + ((__force u32)~ip_mask->ip4src | + (__force u16)~ip_mask->psrc)) + return -EINVAL; + /* and nothing else */ + if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask) + return -EINVAL; + break; + case ETHER_FLOW: + /* Must match all of destination, */ + if (!is_zero_ether_addr(mac_mask->h_dest)) + return -EINVAL; + /* all or none of VID, */ + if (ntuple->fs.vlan_tag_mask != 0xf000 && + ntuple->fs.vlan_tag_mask != 0xffff) + return -EINVAL; + /* and nothing else */ + if (!is_broadcast_ether_addr(mac_mask->h_source) || + mac_mask->h_proto != htons(0xffff)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + filter.priority = EFX_FILTER_PRI_MANUAL; + filter.flags = 0; + + switch (ntuple->fs.flow_type) { + case TCP_V4_FLOW: + if (!ip_mask->ip4src) + efx_filter_set_rx_tcp_full(&filter, + htonl(ip_entry->ip4src), + htons(ip_entry->psrc), + htonl(ip_entry->ip4dst), + htons(ip_entry->pdst)); + else + efx_filter_set_rx_tcp_wild(&filter, + htonl(ip_entry->ip4dst), + htons(ip_entry->pdst)); + break; + case UDP_V4_FLOW: + if (!ip_mask->ip4src) + efx_filter_set_rx_udp_full(&filter, + htonl(ip_entry->ip4src), + htons(ip_entry->psrc), + htonl(ip_entry->ip4dst), + htons(ip_entry->pdst)); + else + efx_filter_set_rx_udp_wild(&filter, + htonl(ip_entry->ip4dst), + htons(ip_entry->pdst)); + break; + case ETHER_FLOW: + if (ntuple->fs.vlan_tag_mask == 0xf000) + efx_filter_set_rx_mac_full(&filter, + ntuple->fs.vlan_tag & 0xfff, + mac_entry->h_dest); + else + efx_filter_set_rx_mac_wild(&filter, mac_entry->h_dest); + break; + } + + if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR) { + return efx_filter_remove_filter(efx, &filter); + } else { + if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) + filter.dmaq_id = 0xfff; + else + filter.dmaq_id = ntuple->fs.action; + return efx_filter_insert_filter(efx, &filter, true); + } +} + static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, struct ethtool_rxfh_indir *indir) { @@ -971,6 +1121,8 @@ const struct ethtool_ops efx_ethtool_ops = { .set_eeprom = efx_ethtool_set_eeprom, .get_coalesce = efx_ethtool_get_coalesce, .set_coalesce = efx_ethtool_set_coalesce, + .get_ringparam = efx_ethtool_get_ringparam, + .set_ringparam = efx_ethtool_set_ringparam, .get_pauseparam = efx_ethtool_get_pauseparam, .set_pauseparam = efx_ethtool_set_pauseparam, .get_rx_csum = efx_ethtool_get_rx_csum, @@ -994,6 +1146,7 @@ const struct ethtool_ops efx_ethtool_ops = { .set_wol = efx_ethtool_set_wol, .reset = efx_ethtool_reset, .get_rxnfc = efx_ethtool_get_rxnfc, + .set_rx_ntuple = efx_ethtool_set_rx_ntuple, .get_rxfh_indir = efx_ethtool_get_rxfh_indir, .set_rxfh_indir = efx_ethtool_set_rxfh_indir, }; |