diff options
Diffstat (limited to 'drivers/net/ethernet/sfc')
-rw-r--r-- | drivers/net/ethernet/sfc/ef10.c | 549 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/efx.c | 201 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/efx.h | 16 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/enum.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/ethtool.c | 16 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/falcon.c | 38 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/farch.c | 48 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/filter.h | 17 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/mcdi.c | 444 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/mcdi.h | 21 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/mcdi_mon.c | 76 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/mcdi_pcol.h | 733 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/mcdi_port.c | 89 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/net_driver.h | 75 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/nic.c | 12 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/nic.h | 34 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/ptp.c | 854 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/rx.c | 24 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/selftest.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/selftest.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/siena.c | 119 |
21 files changed, 2534 insertions, 836 deletions
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 676c3c057bfb..174a92f5fe51 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -14,6 +14,7 @@ #include "mcdi_pcol.h" #include "nic.h" #include "workarounds.h" +#include "selftest.h" #include <linux/in.h> #include <linux/jhash.h> #include <linux/wait.h> @@ -52,31 +53,31 @@ struct efx_ef10_filter_table { struct { unsigned long spec; /* pointer to spec plus flag bits */ -/* BUSY flag indicates that an update is in progress. STACK_OLD is - * used to mark and sweep stack-owned MAC filters. +/* BUSY flag indicates that an update is in progress. AUTO_OLD is + * used to mark and sweep MAC filters for the device address lists. */ #define EFX_EF10_FILTER_FLAG_BUSY 1UL -#define EFX_EF10_FILTER_FLAG_STACK_OLD 2UL +#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL #define EFX_EF10_FILTER_FLAGS 3UL u64 handle; /* firmware handle */ } *entry; wait_queue_head_t waitq; /* Shadow of net_device address lists, guarded by mac_lock */ -#define EFX_EF10_FILTER_STACK_UC_MAX 32 -#define EFX_EF10_FILTER_STACK_MC_MAX 256 +#define EFX_EF10_FILTER_DEV_UC_MAX 32 +#define EFX_EF10_FILTER_DEV_MC_MAX 256 struct { u8 addr[ETH_ALEN]; u16 id; - } stack_uc_list[EFX_EF10_FILTER_STACK_UC_MAX], - stack_mc_list[EFX_EF10_FILTER_STACK_MC_MAX]; - int stack_uc_count; /* negative for PROMISC */ - int stack_mc_count; /* negative for PROMISC/ALLMULTI */ + } dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX], + dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX]; + int dev_uc_count; /* negative for PROMISC */ + int dev_mc_count; /* negative for PROMISC/ALLMULTI */ }; /* An arbitrary search limit for the software hash table */ #define EFX_EF10_FILTER_SEARCH_LIMIT 200 -static void efx_ef10_rx_push_indir_table(struct efx_nic *efx); +static void efx_ef10_rx_push_rss_config(struct efx_nic *efx); static void efx_ef10_rx_free_indir_table(struct efx_nic *efx); static void efx_ef10_filter_table_remove(struct efx_nic *efx); @@ -263,6 +264,8 @@ static int efx_ef10_probe(struct efx_nic *efx) if (rc) goto fail3; + efx_ptp_probe(efx, NULL); + return 0; fail3: @@ -277,11 +280,17 @@ fail1: static int efx_ef10_free_vis(struct efx_nic *efx) { - int rc = efx_mcdi_rpc(efx, MC_CMD_FREE_VIS, NULL, 0, NULL, 0, NULL); + MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0); + size_t outlen; + int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0, + outbuf, sizeof(outbuf), &outlen); /* -EALREADY means nothing to free, so ignore */ if (rc == -EALREADY) rc = 0; + if (rc) + efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen, + rc); return rc; } @@ -465,9 +474,10 @@ static void efx_ef10_remove(struct efx_nic *efx) struct efx_ef10_nic_data *nic_data = efx->nic_data; int rc; + efx_ptp_remove(efx); + efx_mcdi_mon_remove(efx); - /* This needs to be after efx_ptp_remove_channel() with no filters */ efx_ef10_rx_free_indir_table(efx); if (nic_data->wc_membase) @@ -669,10 +679,21 @@ static int efx_ef10_init_nic(struct efx_nic *efx) nic_data->must_restore_piobufs = false; } - efx_ef10_rx_push_indir_table(efx); + efx_ef10_rx_push_rss_config(efx); return 0; } +static void efx_ef10_reset_mc_allocations(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + /* All our allocations have been reset */ + nic_data->must_realloc_vis = true; + nic_data->must_restore_filters = true; + nic_data->must_restore_piobufs = true; + nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; +} + static int efx_ef10_map_reset_flags(u32 *flags) { enum { @@ -703,6 +724,19 @@ static int efx_ef10_map_reset_flags(u32 *flags) return -EINVAL; } +static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type) +{ + int rc = efx_mcdi_reset(efx, reset_type); + + /* If it was a port reset, trigger reallocation of MC resources. + * Note that on an MC reset nothing needs to be done now because we'll + * detect the MC reset later and handle it then. + */ + if (reset_type == RESET_TYPE_ALL && !rc) + efx_ef10_reset_mc_allocations(efx); + return rc; +} + #define EF10_DMA_STAT(ext_name, mcdi_name) \ [EF10_STAT_ ## ext_name] = \ { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name } @@ -764,8 +798,8 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS), EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS), EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS), - EF10_DMA_STAT(rx_dp_emerg_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS), - EF10_DMA_STAT(rx_dp_emerg_wait, RXDP_EMERGENCY_WAIT_CONDITIONS), + EF10_DMA_STAT(rx_dp_hlb_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS), + EF10_DMA_STAT(rx_dp_hlb_wait, RXDP_EMERGENCY_WAIT_CONDITIONS), }; #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \ @@ -834,8 +868,8 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { (1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \ (1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \ (1ULL << EF10_STAT_rx_dp_streaming_packets) | \ - (1ULL << EF10_STAT_rx_dp_emerg_fetch) | \ - (1ULL << EF10_STAT_rx_dp_emerg_wait)) + (1ULL << EF10_STAT_rx_dp_hlb_fetch) | \ + (1ULL << EF10_STAT_rx_dp_hlb_wait)) static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) { @@ -901,6 +935,7 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx) return -EAGAIN; /* Update derived statistics */ + efx_nic_fix_nodesc_drop_stat(efx, &stats[EF10_STAT_rx_nodesc_drops]); stats[EF10_STAT_rx_good_bytes] = stats[EF10_STAT_rx_bytes] - stats[EF10_STAT_rx_bytes_minus_good_bytes]; @@ -1067,10 +1102,7 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx) nic_data->warm_boot_count = rc; /* All our allocations have been reset */ - nic_data->must_realloc_vis = true; - nic_data->must_restore_filters = true; - nic_data->must_restore_piobufs = true; - nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; + efx_ef10_reset_mc_allocations(efx); /* The datapath firmware might have been changed */ nic_data->must_check_datapath_caps = true; @@ -1241,8 +1273,8 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) return; fail: - WARN_ON(true); - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n", + tx_queue->queue); } static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue) @@ -1256,7 +1288,7 @@ static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue) MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE, tx_queue->queue); - rc = efx_mcdi_rpc(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf), + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); if (rc && rc != -EALREADY) @@ -1265,7 +1297,8 @@ static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue) return; fail: - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN, + outbuf, outlen, rc); } static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue) @@ -1408,12 +1441,12 @@ static void efx_ef10_rx_free_indir_table(struct efx_nic *efx) nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; } -static void efx_ef10_rx_push_indir_table(struct efx_nic *efx) +static void efx_ef10_rx_push_rss_config(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data = efx->nic_data; int rc; - netif_dbg(efx, drv, efx->net_dev, "pushing RX indirection table\n"); + netif_dbg(efx, drv, efx->net_dev, "pushing RSS config\n"); if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) { rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context); @@ -1461,8 +1494,9 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue) MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue)); MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE, efx_rx_queue_index(rx_queue)); - MCDI_POPULATE_DWORD_1(inbuf, INIT_RXQ_IN_FLAGS, - INIT_RXQ_IN_FLAG_PREFIX, 1); + MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS, + INIT_RXQ_IN_FLAG_PREFIX, 1, + INIT_RXQ_IN_FLAG_TIMESTAMP, 1); MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0); MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); @@ -1481,13 +1515,8 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue) rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen, outbuf, sizeof(outbuf), &outlen); if (rc) - goto fail; - - return; - -fail: - WARN_ON(true); - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n", + efx_rx_queue_index(rx_queue)); } static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue) @@ -1501,7 +1530,7 @@ static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue) MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE, efx_rx_queue_index(rx_queue)); - rc = efx_mcdi_rpc(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf), + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); if (rc && rc != -EALREADY) @@ -1510,7 +1539,8 @@ static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue) return; fail: - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN, + outbuf, outlen, rc); } static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue) @@ -1647,15 +1677,7 @@ static int efx_ef10_ev_init(struct efx_channel *channel) rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen, outbuf, sizeof(outbuf), &outlen); - if (rc) - goto fail; - /* IRQ return is ignored */ - - return 0; - -fail: - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } @@ -1669,7 +1691,7 @@ static void efx_ef10_ev_fini(struct efx_channel *channel) MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel); - rc = efx_mcdi_rpc(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf), + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); if (rc && rc != -EALREADY) @@ -1678,7 +1700,8 @@ static void efx_ef10_ev_fini(struct efx_channel *channel) return; fail: - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN, + outbuf, outlen, rc); } static void efx_ef10_ev_remove(struct efx_channel *channel) @@ -1717,8 +1740,6 @@ static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue) { unsigned int rx_desc_ptr; - WARN_ON(rx_queue->scatter_n == 0); - netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev, "scattered RX aborted (dropping %u buffers)\n", rx_queue->scatter_n); @@ -1754,7 +1775,10 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel, rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS); rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT); - WARN_ON(EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT)); + if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT)) + netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event=" + EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*event)); rx_queue = efx_channel_get_rx_queue(channel); @@ -1765,17 +1789,27 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel, ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); if (n_descs != rx_queue->scatter_n + 1) { + struct efx_ef10_nic_data *nic_data = efx->nic_data; + /* detect rx abort */ if (unlikely(n_descs == rx_queue->scatter_n)) { - WARN_ON(rx_bytes != 0); + if (rx_queue->scatter_n == 0 || rx_bytes != 0) + netdev_WARN(efx->net_dev, + "invalid RX abort: scatter_n=%u event=" + EFX_QWORD_FMT "\n", + rx_queue->scatter_n, + EFX_QWORD_VAL(*event)); efx_ef10_handle_rx_abort(rx_queue); return 0; } - if (unlikely(rx_queue->scatter_n != 0)) { - /* Scattered packet completions cannot be - * merged, so something has gone wrong. - */ + /* Check that RX completion merging is valid, i.e. + * the current firmware supports it and this is a + * non-scattered packet. + */ + if (!(nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) || + rx_queue->scatter_n != 0 || rx_cont) { efx_ef10_handle_rx_bad_lbits( rx_queue, next_ptr_lbits, (rx_queue->removed_count + @@ -1901,7 +1935,7 @@ static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel, * events, so efx_process_channel() won't refill the * queue. Refill it here */ - efx_fast_push_rx_descriptors(&channel->rx_queue); + efx_fast_push_rx_descriptors(&channel->rx_queue, true); break; default: netif_err(efx, hw, efx->net_dev, @@ -2232,7 +2266,9 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx, MC_CMD_FILTER_OP_IN_RX_DEST_HOST); MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST, MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT); - MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, spec->dmaq_id); + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, + spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? + 0 : spec->dmaq_id); MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE, (spec->flags & EFX_FILTER_FLAG_RX_RSS) ? MC_CMD_FILTER_OP_IN_RX_MODE_RSS : @@ -2257,6 +2293,8 @@ static int efx_ef10_filter_push(struct efx_nic *efx, outbuf, sizeof(outbuf), NULL); if (rc == 0) *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE); + if (rc == -ENOSPC) + rc = -EBUSY; /* to match efx_farch_filter_insert() */ return rc; } @@ -2326,10 +2364,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx, EFX_EF10_FILTER_FLAG_BUSY) break; if (spec->priority < saved_spec->priority && - !(saved_spec->priority == - EFX_FILTER_PRI_REQUIRED && - saved_spec->flags & - EFX_FILTER_FLAG_RX_STACK)) { + spec->priority != EFX_FILTER_PRI_AUTO) { rc = -EPERM; goto out_unlock; } @@ -2383,11 +2418,13 @@ found: */ saved_spec = efx_ef10_filter_entry_spec(table, ins_index); if (saved_spec) { - if (spec->flags & EFX_FILTER_FLAG_RX_STACK) { + if (spec->priority == EFX_FILTER_PRI_AUTO && + saved_spec->priority >= EFX_FILTER_PRI_AUTO) { /* Just make sure it won't be removed */ - saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK; + if (saved_spec->priority > EFX_FILTER_PRI_AUTO) + saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO; table->entry[ins_index].spec &= - ~EFX_EF10_FILTER_FLAG_STACK_OLD; + ~EFX_EF10_FILTER_FLAG_AUTO_OLD; rc = ins_index; goto out_unlock; } @@ -2427,8 +2464,11 @@ found: if (rc == 0) { if (replacing) { /* Update the fields that may differ */ + if (saved_spec->priority == EFX_FILTER_PRI_AUTO) + saved_spec->flags |= + EFX_FILTER_FLAG_RX_OVER_AUTO; saved_spec->priority = spec->priority; - saved_spec->flags &= EFX_FILTER_FLAG_RX_STACK; + saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO; saved_spec->flags |= spec->flags; saved_spec->rss_context = spec->rss_context; saved_spec->dmaq_id = spec->dmaq_id; @@ -2497,13 +2537,13 @@ static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx) } /* Remove a filter. - * If !stack_requested, remove by ID - * If stack_requested, remove by index + * If !by_index, remove by ID + * If by_index, remove by index * Filter ID may come from userland and must be range-checked. */ static int efx_ef10_filter_remove_internal(struct efx_nic *efx, - enum efx_filter_priority priority, - u32 filter_id, bool stack_requested) + unsigned int priority_mask, + u32 filter_id, bool by_index) { unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS; struct efx_ef10_filter_table *table = efx->filter_state; @@ -2527,26 +2567,41 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx, spin_unlock_bh(&efx->filter_lock); schedule(); } + spec = efx_ef10_filter_entry_spec(table, filter_idx); - if (!spec || spec->priority > priority || - (!stack_requested && + if (!spec || + (!by_index && efx_ef10_filter_rx_match_pri(table, spec->match_flags) != filter_id / HUNT_FILTER_TBL_ROWS)) { rc = -ENOENT; goto out_unlock; } + + if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO && + priority_mask == (1U << EFX_FILTER_PRI_AUTO)) { + /* Just remove flags */ + spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO; + table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD; + rc = 0; + goto out_unlock; + } + + if (!(priority_mask & (1U << spec->priority))) { + rc = -ENOENT; + goto out_unlock; + } + table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; spin_unlock_bh(&efx->filter_lock); - if (spec->flags & EFX_FILTER_FLAG_RX_STACK && !stack_requested) { - /* Reset steering of a stack-owned filter */ + if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) { + /* Reset to an automatic filter */ struct efx_filter_spec new_spec = *spec; - new_spec.priority = EFX_FILTER_PRI_REQUIRED; + new_spec.priority = EFX_FILTER_PRI_AUTO; new_spec.flags = (EFX_FILTER_FLAG_RX | - EFX_FILTER_FLAG_RX_RSS | - EFX_FILTER_FLAG_RX_STACK); + EFX_FILTER_FLAG_RX_RSS); new_spec.dmaq_id = 0; new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT; rc = efx_ef10_filter_push(efx, &new_spec, @@ -2574,6 +2629,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx, efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); } } + table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY; wake_up_all(&table->waitq); out_unlock: @@ -2586,7 +2642,8 @@ static int efx_ef10_filter_remove_safe(struct efx_nic *efx, enum efx_filter_priority priority, u32 filter_id) { - return efx_ef10_filter_remove_internal(efx, priority, filter_id, false); + return efx_ef10_filter_remove_internal(efx, 1U << priority, + filter_id, false); } static int efx_ef10_filter_get_safe(struct efx_nic *efx, @@ -2612,10 +2669,24 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx, return rc; } -static void efx_ef10_filter_clear_rx(struct efx_nic *efx, +static int efx_ef10_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority) { - /* TODO */ + unsigned int priority_mask; + unsigned int i; + int rc; + + priority_mask = (((1U << (priority + 1)) - 1) & + ~(1U << EFX_FILTER_PRI_AUTO)); + + for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { + rc = efx_ef10_filter_remove_internal(efx, priority_mask, + i, true); + if (rc && rc != -ENOENT) + return rc; + } + + return 0; } static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx, @@ -2716,8 +2787,6 @@ static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx, rc = -EBUSY; goto fail_unlock; } - EFX_WARN_ON_PARANOID(saved_spec->flags & - EFX_FILTER_FLAG_RX_STACK); if (spec->priority < saved_spec->priority) { rc = -EPERM; goto fail_unlock; @@ -3027,8 +3096,11 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx) table->entry[filter_idx].handle); rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), NULL, 0, NULL); - - WARN_ON(rc != 0); + if (rc) + netdev_WARN(efx->net_dev, + "filter_idx=%#x handle=%#llx\n", + filter_idx, + table->entry[filter_idx].handle); kfree(spec); } @@ -3052,15 +3124,15 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) /* Mark old filters that may need to be removed */ spin_lock_bh(&efx->filter_lock); - n = table->stack_uc_count < 0 ? 1 : table->stack_uc_count; + n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count; for (i = 0; i < n; i++) { - filter_idx = table->stack_uc_list[i].id % HUNT_FILTER_TBL_ROWS; - table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD; + filter_idx = table->dev_uc_list[i].id % HUNT_FILTER_TBL_ROWS; + table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; } - n = table->stack_mc_count < 0 ? 1 : table->stack_mc_count; + n = table->dev_mc_count < 0 ? 1 : table->dev_mc_count; for (i = 0; i < n; i++) { - filter_idx = table->stack_mc_list[i].id % HUNT_FILTER_TBL_ROWS; - table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD; + filter_idx = table->dev_mc_list[i].id % HUNT_FILTER_TBL_ROWS; + table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; } spin_unlock_bh(&efx->filter_lock); @@ -3069,28 +3141,28 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) */ netif_addr_lock_bh(net_dev); if (net_dev->flags & IFF_PROMISC || - netdev_uc_count(net_dev) >= EFX_EF10_FILTER_STACK_UC_MAX) { - table->stack_uc_count = -1; + netdev_uc_count(net_dev) >= EFX_EF10_FILTER_DEV_UC_MAX) { + table->dev_uc_count = -1; } else { - table->stack_uc_count = 1 + netdev_uc_count(net_dev); - memcpy(table->stack_uc_list[0].addr, net_dev->dev_addr, + table->dev_uc_count = 1 + netdev_uc_count(net_dev); + memcpy(table->dev_uc_list[0].addr, net_dev->dev_addr, ETH_ALEN); i = 1; netdev_for_each_uc_addr(uc, net_dev) { - memcpy(table->stack_uc_list[i].addr, + memcpy(table->dev_uc_list[i].addr, uc->addr, ETH_ALEN); i++; } } if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) || - netdev_mc_count(net_dev) >= EFX_EF10_FILTER_STACK_MC_MAX) { - table->stack_mc_count = -1; + netdev_mc_count(net_dev) >= EFX_EF10_FILTER_DEV_MC_MAX) { + table->dev_mc_count = -1; } else { - table->stack_mc_count = 1 + netdev_mc_count(net_dev); - eth_broadcast_addr(table->stack_mc_list[0].addr); + table->dev_mc_count = 1 + netdev_mc_count(net_dev); + eth_broadcast_addr(table->dev_mc_list[0].addr); i = 1; netdev_for_each_mc_addr(mc, net_dev) { - memcpy(table->stack_mc_list[i].addr, + memcpy(table->dev_mc_list[i].addr, mc->addr, ETH_ALEN); i++; } @@ -3098,90 +3170,86 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) netif_addr_unlock_bh(net_dev); /* Insert/renew unicast filters */ - if (table->stack_uc_count >= 0) { - for (i = 0; i < table->stack_uc_count; i++) { - efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED, - EFX_FILTER_FLAG_RX_RSS | - EFX_FILTER_FLAG_RX_STACK, + if (table->dev_uc_count >= 0) { + for (i = 0; i < table->dev_uc_count; i++) { + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, + EFX_FILTER_FLAG_RX_RSS, 0); efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, - table->stack_uc_list[i].addr); + table->dev_uc_list[i].addr); rc = efx_ef10_filter_insert(efx, &spec, true); if (rc < 0) { /* Fall back to unicast-promisc */ while (i--) efx_ef10_filter_remove_safe( - efx, EFX_FILTER_PRI_REQUIRED, - table->stack_uc_list[i].id); - table->stack_uc_count = -1; + efx, EFX_FILTER_PRI_AUTO, + table->dev_uc_list[i].id); + table->dev_uc_count = -1; break; } - table->stack_uc_list[i].id = rc; + table->dev_uc_list[i].id = rc; } } - if (table->stack_uc_count < 0) { - efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED, - EFX_FILTER_FLAG_RX_RSS | - EFX_FILTER_FLAG_RX_STACK, + if (table->dev_uc_count < 0) { + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, + EFX_FILTER_FLAG_RX_RSS, 0); efx_filter_set_uc_def(&spec); rc = efx_ef10_filter_insert(efx, &spec, true); if (rc < 0) { WARN_ON(1); - table->stack_uc_count = 0; + table->dev_uc_count = 0; } else { - table->stack_uc_list[0].id = rc; + table->dev_uc_list[0].id = rc; } } /* Insert/renew multicast filters */ - if (table->stack_mc_count >= 0) { - for (i = 0; i < table->stack_mc_count; i++) { - efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED, - EFX_FILTER_FLAG_RX_RSS | - EFX_FILTER_FLAG_RX_STACK, + if (table->dev_mc_count >= 0) { + for (i = 0; i < table->dev_mc_count; i++) { + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, + EFX_FILTER_FLAG_RX_RSS, 0); efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, - table->stack_mc_list[i].addr); + table->dev_mc_list[i].addr); rc = efx_ef10_filter_insert(efx, &spec, true); if (rc < 0) { /* Fall back to multicast-promisc */ while (i--) efx_ef10_filter_remove_safe( - efx, EFX_FILTER_PRI_REQUIRED, - table->stack_mc_list[i].id); - table->stack_mc_count = -1; + efx, EFX_FILTER_PRI_AUTO, + table->dev_mc_list[i].id); + table->dev_mc_count = -1; break; } - table->stack_mc_list[i].id = rc; + table->dev_mc_list[i].id = rc; } } - if (table->stack_mc_count < 0) { - efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED, - EFX_FILTER_FLAG_RX_RSS | - EFX_FILTER_FLAG_RX_STACK, + if (table->dev_mc_count < 0) { + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, + EFX_FILTER_FLAG_RX_RSS, 0); efx_filter_set_mc_def(&spec); rc = efx_ef10_filter_insert(efx, &spec, true); if (rc < 0) { WARN_ON(1); - table->stack_mc_count = 0; + table->dev_mc_count = 0; } else { - table->stack_mc_list[0].id = rc; + table->dev_mc_list[0].id = rc; } } /* Remove filters that weren't renewed. Since nothing else - * changes the STACK_OLD flag or removes these filters, we + * changes the AUTO_OLD flag or removes these filters, we * don't need to hold the filter_lock while scanning for * these filters. */ for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { if (ACCESS_ONCE(table->entry[i].spec) & - EFX_EF10_FILTER_FLAG_STACK_OLD) { - if (efx_ef10_filter_remove_internal(efx, - EFX_FILTER_PRI_REQUIRED, - i, true) < 0) + EFX_EF10_FILTER_FLAG_AUTO_OLD) { + if (efx_ef10_filter_remove_internal( + efx, 1U << EFX_FILTER_PRI_AUTO, + i, true) < 0) remove_failed = true; } } @@ -3195,6 +3263,87 @@ static int efx_ef10_mac_reconfigure(struct efx_nic *efx) return efx_mcdi_set_mac(efx); } +static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN); + + MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type); + return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +/* MC BISTs follow a different poll mechanism to phy BISTs. + * The BIST is done in the poll handler on the MC, and the MCDI command + * will block until the BIST is done. + */ +static int efx_ef10_poll_bist(struct efx_nic *efx) +{ + int rc; + MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN); + size_t outlen; + u32 result; + + rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc != 0) + return rc; + + if (outlen < MC_CMD_POLL_BIST_OUT_LEN) + return -EIO; + + result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT); + switch (result) { + case MC_CMD_POLL_BIST_PASSED: + netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n"); + return 0; + case MC_CMD_POLL_BIST_TIMEOUT: + netif_err(efx, hw, efx->net_dev, "BIST timed out\n"); + return -EIO; + case MC_CMD_POLL_BIST_FAILED: + netif_err(efx, hw, efx->net_dev, "BIST failed.\n"); + return -EIO; + default: + netif_err(efx, hw, efx->net_dev, + "BIST returned unknown result %u", result); + return -EIO; + } +} + +static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type) +{ + int rc; + + netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type); + + rc = efx_ef10_start_bist(efx, bist_type); + if (rc != 0) + return rc; + + return efx_ef10_poll_bist(efx); +} + +static int +efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) +{ + int rc, rc2; + + efx_reset_down(efx, RESET_TYPE_WORLD); + + rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST, + NULL, 0, NULL, 0, NULL); + if (rc != 0) + goto out; + + tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1; + tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1; + + rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD); + +out: + rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0); + return rc ? rc : rc2; +} + #ifdef CONFIG_SFC_MTD struct efx_ef10_nvram_type_info { @@ -3213,6 +3362,7 @@ static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = { { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" }, { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" }, { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" }, + { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" }, { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" }, }; @@ -3320,6 +3470,119 @@ static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time) _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD); } +static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel, + bool temp) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN); + int rc; + + if (channel->sync_events_state == SYNC_EVENTS_REQUESTED || + channel->sync_events_state == SYNC_EVENTS_VALID || + (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED)) + return 0; + channel->sync_events_state = SYNC_EVENTS_REQUESTED; + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE, + channel->channel); + + rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP, + inbuf, sizeof(inbuf), NULL, 0, NULL); + + if (rc != 0) + channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT : + SYNC_EVENTS_DISABLED; + + return rc; +} + +static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel, + bool temp) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN); + int rc; + + if (channel->sync_events_state == SYNC_EVENTS_DISABLED || + (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT)) + return 0; + if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) { + channel->sync_events_state = SYNC_EVENTS_DISABLED; + return 0; + } + channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT : + SYNC_EVENTS_DISABLED; + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL, + MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE); + MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE, + channel->channel); + + rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP, + inbuf, sizeof(inbuf), NULL, 0, NULL); + + return rc; +} + +static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en, + bool temp) +{ + int (*set)(struct efx_channel *channel, bool temp); + struct efx_channel *channel; + + set = en ? + efx_ef10_rx_enable_timestamping : + efx_ef10_rx_disable_timestamping; + + efx_for_each_channel(channel, efx) { + int rc = set(channel, temp); + if (en && rc != 0) { + efx_ef10_ptp_set_ts_sync_events(efx, false, temp); + return rc; + } + } + + return 0; +} + +static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx, + struct hwtstamp_config *init) +{ + int rc; + + switch (init->rx_filter) { + case HWTSTAMP_FILTER_NONE: + efx_ef10_ptp_set_ts_sync_events(efx, false, false); + /* if TX timestamping is still requested then leave PTP on */ + return efx_ptp_change_mode(efx, + init->tx_type != HWTSTAMP_TX_OFF, 0); + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + init->rx_filter = HWTSTAMP_FILTER_ALL; + rc = efx_ptp_change_mode(efx, true, 0); + if (!rc) + rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false); + if (rc) + efx_ptp_change_mode(efx, false, 0); + return rc; + default: + return -ERANGE; + } +} + const struct efx_nic_type efx_hunt_a0_nic_type = { .mem_map_size = efx_ef10_mem_map_size, .probe = efx_ef10_probe, @@ -3329,13 +3592,14 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .fini = efx_port_dummy_op_void, .map_reset_reason = efx_mcdi_map_reset_reason, .map_reset_flags = efx_ef10_map_reset_flags, - .reset = efx_mcdi_reset, + .reset = efx_ef10_reset, .probe_port = efx_mcdi_port_probe, .remove_port = efx_mcdi_port_remove, .fini_dmaq = efx_ef10_fini_dmaq, .describe_stats = efx_ef10_describe_stats, .update_stats = efx_ef10_update_stats, .start_stats = efx_mcdi_mac_start_stats, + .pull_stats = efx_mcdi_mac_pull_stats, .stop_stats = efx_mcdi_mac_stop_stats, .set_id_led = efx_mcdi_set_id_led, .push_irq_moderation = efx_ef10_push_irq_moderation, @@ -3345,7 +3609,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .get_wol = efx_ef10_get_wol, .set_wol = efx_ef10_set_wol, .resume_wol = efx_port_dummy_op_void, - /* TODO: test_chip */ + .test_chip = efx_ef10_test_chip, .test_nvram = efx_mcdi_nvram_test_all, .mcdi_request = efx_ef10_mcdi_request, .mcdi_poll_response = efx_ef10_mcdi_poll_response, @@ -3360,7 +3624,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .tx_init = efx_ef10_tx_init, .tx_remove = efx_ef10_tx_remove, .tx_write = efx_ef10_tx_write, - .rx_push_indir_table = efx_ef10_rx_push_indir_table, + .rx_push_rss_config = efx_ef10_rx_push_rss_config, .rx_probe = efx_ef10_rx_probe, .rx_init = efx_ef10_rx_init, .rx_remove = efx_ef10_rx_remove, @@ -3397,11 +3661,14 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .mtd_sync = efx_mcdi_mtd_sync, #endif .ptp_write_host_time = efx_ef10_ptp_write_host_time, + .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events, + .ptp_set_ts_config = efx_ef10_ptp_set_ts_config, .revision = EFX_REV_HUNT_A0, .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, + .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, .can_rx_scatter = true, .always_rx_scatter = true, .max_interrupt_mode = EFX_INT_MODE_MSIX, @@ -3410,4 +3677,6 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { NETIF_F_RXHASH | NETIF_F_NTUPLE), .mcdi_max_ver = 2, .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, + .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | + 1 << HWTSTAMP_FILTER_ALL, }; diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index fd844b53e385..83d464347021 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -83,6 +83,7 @@ const char *const efx_reset_type_names[] = { [RESET_TYPE_DMA_ERROR] = "DMA_ERROR", [RESET_TYPE_TX_SKIP] = "TX_SKIP", [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", + [RESET_TYPE_MC_BIST] = "MC_BIST", }; /* Reset workqueue. If any NIC has a hardware failure then a reset will be @@ -91,6 +92,12 @@ const char *const efx_reset_type_names[] = { */ static struct workqueue_struct *reset_workqueue; +/* How often and how many times to poll for a reset while waiting for a + * BIST that another function started to complete. + */ +#define BIST_WAIT_DELAY_MS 100 +#define BIST_WAIT_DELAY_COUNT 100 + /************************************************************************** * * Configurable values @@ -246,7 +253,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget) efx_channel_get_rx_queue(channel); efx_rx_flush_packet(channel); - efx_fast_push_rx_descriptors(rx_queue); + efx_fast_push_rx_descriptors(rx_queue, true); } return spent; @@ -639,7 +646,9 @@ static void efx_start_datapath(struct efx_nic *efx) efx_for_each_channel_rx_queue(rx_queue, channel) { efx_init_rx_queue(rx_queue); atomic_inc(&efx->active_queues); - efx_nic_generate_fill_event(rx_queue); + efx_stop_eventq(channel); + efx_fast_push_rx_descriptors(rx_queue, false); + efx_start_eventq(channel); } WARN_ON(channel->rx_pkt_n_frags); @@ -1051,18 +1060,23 @@ static void efx_start_port(struct efx_nic *efx) mutex_lock(&efx->mac_lock); efx->port_enabled = true; - /* efx_mac_work() might have been scheduled after efx_stop_port(), - * and then cancelled by efx_flush_all() */ + /* Ensure MAC ingress/egress is enabled */ efx->type->reconfigure_mac(efx); mutex_unlock(&efx->mac_lock); } -/* Prevent efx_mac_work() and efx_monitor() from working */ +/* Cancel work for MAC reconfiguration, periodic hardware monitoring + * and the async self-test, wait for them to finish and prevent them + * being scheduled again. This doesn't cover online resets, which + * should only be cancelled when removing the device. + */ static void efx_stop_port(struct efx_nic *efx) { netif_dbg(efx, ifdown, efx->net_dev, "stop port\n"); + EFX_ASSERT_RESET_SERIALISED(efx); + mutex_lock(&efx->mac_lock); efx->port_enabled = false; mutex_unlock(&efx->mac_lock); @@ -1070,6 +1084,10 @@ static void efx_stop_port(struct efx_nic *efx) /* Serialise against efx_set_multicast_list() */ netif_addr_lock_bh(efx->net_dev); netif_addr_unlock_bh(efx->net_dev); + + cancel_delayed_work_sync(&efx->monitor_work); + efx_selftest_async_cancel(efx); + cancel_work_sync(&efx->mac_work); } static void efx_fini_port(struct efx_nic *efx) @@ -1099,6 +1117,77 @@ static void efx_remove_port(struct efx_nic *efx) * **************************************************************************/ +static LIST_HEAD(efx_primary_list); +static LIST_HEAD(efx_unassociated_list); + +static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right) +{ + return left->type == right->type && + left->vpd_sn && right->vpd_sn && + !strcmp(left->vpd_sn, right->vpd_sn); +} + +static void efx_associate(struct efx_nic *efx) +{ + struct efx_nic *other, *next; + + if (efx->primary == efx) { + /* Adding primary function; look for secondaries */ + + netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n"); + list_add_tail(&efx->node, &efx_primary_list); + + list_for_each_entry_safe(other, next, &efx_unassociated_list, + node) { + if (efx_same_controller(efx, other)) { + list_del(&other->node); + netif_dbg(other, probe, other->net_dev, + "moving to secondary list of %s %s\n", + pci_name(efx->pci_dev), + efx->net_dev->name); + list_add_tail(&other->node, + &efx->secondary_list); + other->primary = efx; + } + } + } else { + /* Adding secondary function; look for primary */ + + list_for_each_entry(other, &efx_primary_list, node) { + if (efx_same_controller(efx, other)) { + netif_dbg(efx, probe, efx->net_dev, + "adding to secondary list of %s %s\n", + pci_name(other->pci_dev), + other->net_dev->name); + list_add_tail(&efx->node, + &other->secondary_list); + efx->primary = other; + return; + } + } + + netif_dbg(efx, probe, efx->net_dev, + "adding to unassociated list\n"); + list_add_tail(&efx->node, &efx_unassociated_list); + } +} + +static void efx_dissociate(struct efx_nic *efx) +{ + struct efx_nic *other, *next; + + list_del(&efx->node); + efx->primary = NULL; + + list_for_each_entry_safe(other, next, &efx->secondary_list, node) { + list_del(&other->node); + netif_dbg(other, probe, other->net_dev, + "moving to unassociated list\n"); + list_add_tail(&other->node, &efx_unassociated_list); + other->primary = NULL; + } +} + /* This configures the PCI device to enable I/O and DMA. */ static int efx_init_io(struct efx_nic *efx) { @@ -1675,18 +1764,10 @@ static void efx_start_all(struct efx_nic *efx) } efx->type->start_stats(efx); -} - -/* Flush all delayed work. Should only be called when no more delayed work - * will be scheduled. This doesn't flush pending online resets (efx_reset), - * since we're holding the rtnl_lock at this point. */ -static void efx_flush_all(struct efx_nic *efx) -{ - /* Make sure the hardware monitor and event self-test are stopped */ - cancel_delayed_work_sync(&efx->monitor_work); - efx_selftest_async_cancel(efx); - /* Stop scheduled port reconfigurations */ - cancel_work_sync(&efx->mac_work); + efx->type->pull_stats(efx); + spin_lock_bh(&efx->stats_lock); + efx->type->update_stats(efx, NULL, NULL); + spin_unlock_bh(&efx->stats_lock); } /* Quiesce the hardware and software data path, and regular activity @@ -1702,12 +1783,16 @@ static void efx_stop_all(struct efx_nic *efx) if (!efx->port_enabled) return; + /* update stats before we go down so we can accurately count + * rx_nodesc_drops + */ + efx->type->pull_stats(efx); + spin_lock_bh(&efx->stats_lock); + efx->type->update_stats(efx, NULL, NULL); + spin_unlock_bh(&efx->stats_lock); efx->type->stop_stats(efx); efx_stop_port(efx); - /* Flush efx_mac_work(), refill_workqueue, monitor_work */ - efx_flush_all(efx); - /* Stop the kernel transmit interface. This is only valid if * the device is stopped or detached; otherwise the watchdog * may fire immediately. @@ -1851,7 +1936,9 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) struct mii_ioctl_data *data = if_mii(ifr); if (cmd == SIOCSHWTSTAMP) - return efx_ptp_ioctl(efx, ifr, cmd); + return efx_ptp_set_ts_config(efx, ifr); + if (cmd == SIOCGHWTSTAMP) + return efx_ptp_get_ts_config(efx, ifr); /* Convert phy_id from older PRTAD/DEVAD format */ if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && @@ -2064,7 +2151,7 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data) /* If disabling RX n-tuple filtering, clear existing filters */ if (net_dev->features & ~data & NETIF_F_NTUPLE) - efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); + return efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); return 0; } @@ -2198,6 +2285,8 @@ static int efx_register_netdev(struct efx_nic *efx) efx_init_tx_queue_core_txq(tx_queue); } + efx_associate(efx); + rtnl_unlock(); rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); @@ -2211,6 +2300,7 @@ static int efx_register_netdev(struct efx_nic *efx) fail_registered: rtnl_lock(); + efx_dissociate(efx); unregister_netdevice(net_dev); fail_locked: efx->state = STATE_UNINIT; @@ -2387,6 +2477,24 @@ int efx_try_recovery(struct efx_nic *efx) return 0; } +static void efx_wait_for_bist_end(struct efx_nic *efx) +{ + int i; + + for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) { + if (efx_mcdi_poll_reboot(efx)) + goto out; + msleep(BIST_WAIT_DELAY_MS); + } + + netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n"); +out: + /* Either way unset the BIST flag. If we found no reboot we probably + * won't recover, but we should try. + */ + efx->mc_bist_for_other_fn = false; +} + /* The worker thread exists so that code that cannot sleep can * schedule a reset for later. */ @@ -2399,6 +2507,9 @@ static void efx_reset_work(struct work_struct *data) pending = ACCESS_ONCE(efx->reset_pending); method = fls(pending) - 1; + if (method == RESET_TYPE_MC_BIST) + efx_wait_for_bist_end(efx); + if ((method == RESET_TYPE_RECOVER_OR_DISABLE || method == RESET_TYPE_RECOVER_OR_ALL) && efx_try_recovery(efx)) @@ -2437,6 +2548,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) case RESET_TYPE_WORLD: case RESET_TYPE_DISABLE: case RESET_TYPE_RECOVER_OR_DISABLE: + case RESET_TYPE_MC_BIST: method = type; netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", RESET_TYPE(method)); @@ -2530,6 +2642,8 @@ static int efx_init_struct(struct efx_nic *efx, int i; /* Initialise common structures */ + INIT_LIST_HEAD(&efx->node); + INIT_LIST_HEAD(&efx->secondary_list); spin_lock_init(&efx->biu_lock); #ifdef CONFIG_SFC_MTD INIT_LIST_HEAD(&efx->mtd_list); @@ -2548,6 +2662,8 @@ static int efx_init_struct(struct efx_nic *efx, NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0; efx->rx_packet_hash_offset = efx->type->rx_hash_offset - efx->type->rx_prefix_size; + efx->rx_packet_ts_offset = + efx->type->rx_ts_offset - efx->type->rx_prefix_size; spin_lock_init(&efx->stats_lock); mutex_init(&efx->mac_lock); efx->phy_op = &efx_dummy_phy_operations; @@ -2588,6 +2704,8 @@ static void efx_fini_struct(struct efx_nic *efx) for (i = 0; i < EFX_MAX_CHANNELS; i++) kfree(efx->channel[i]); + kfree(efx->vpd_sn); + if (efx->workqueue) { destroy_workqueue(efx->workqueue); efx->workqueue = NULL; @@ -2632,6 +2750,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev) /* Mark the NIC as fini, then stop the interface */ rtnl_lock(); + efx_dissociate(efx); dev_close(efx->net_dev); efx_disable_interrupts(efx); rtnl_unlock(); @@ -2647,7 +2766,6 @@ static void efx_pci_remove(struct pci_dev *pci_dev) netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); efx_fini_struct(efx); - pci_set_drvdata(pci_dev, NULL); free_netdev(efx->net_dev); pci_disable_pcie_error_reporting(pci_dev); @@ -2659,12 +2777,12 @@ static void efx_pci_remove(struct pci_dev *pci_dev) * always appear within the first 512 bytes. */ #define SFC_VPD_LEN 512 -static void efx_print_product_vpd(struct efx_nic *efx) +static void efx_probe_vpd_strings(struct efx_nic *efx) { struct pci_dev *dev = efx->pci_dev; char vpd_data[SFC_VPD_LEN]; ssize_t vpd_size; - int i, j; + int ro_start, ro_size, i, j; /* Get the vpd data from the device */ vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data); @@ -2674,14 +2792,15 @@ static void efx_print_product_vpd(struct efx_nic *efx) } /* Get the Read only section */ - i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA); - if (i < 0) { + ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA); + if (ro_start < 0) { netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n"); return; } - j = pci_vpd_lrdt_size(&vpd_data[i]); - i += PCI_VPD_LRDT_TAG_SIZE; + ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]); + j = ro_size; + i = ro_start + PCI_VPD_LRDT_TAG_SIZE; if (i + j > vpd_size) j = vpd_size - i; @@ -2701,6 +2820,27 @@ static void efx_print_product_vpd(struct efx_nic *efx) netif_info(efx, drv, efx->net_dev, "Part Number : %.*s\n", j, &vpd_data[i]); + + i = ro_start + PCI_VPD_LRDT_TAG_SIZE; + j = ro_size; + i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN"); + if (i < 0) { + netif_err(efx, drv, efx->net_dev, "Serial number not found\n"); + return; + } + + j = pci_vpd_info_field_size(&vpd_data[i]); + i += PCI_VPD_INFO_FLD_HDR_SIZE; + if (i + j > vpd_size) { + netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n"); + return; + } + + efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL); + if (!efx->vpd_sn) + return; + + snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]); } @@ -2797,7 +2937,7 @@ static int efx_pci_probe(struct pci_dev *pci_dev, netif_info(efx, probe, efx->net_dev, "Solarflare NIC detected\n"); - efx_print_product_vpd(efx); + efx_probe_vpd_strings(efx); /* Set up basic I/O (BAR mappings etc) */ rc = efx_init_io(efx); @@ -2841,7 +2981,6 @@ static int efx_pci_probe(struct pci_dev *pci_dev, fail2: efx_fini_struct(efx); fail1: - pci_set_drvdata(pci_dev, NULL); WARN_ON(rc > 0); netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); free_netdev(net_dev); diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index b8235ee5d7d7..dbd7b78fe01c 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -37,7 +37,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); void efx_init_rx_queue(struct efx_rx_queue *rx_queue); void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); -void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); +void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic); void efx_rx_slow_fill(unsigned long context); void __efx_rx_packet(struct efx_channel *channel); void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, @@ -66,6 +66,9 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); #define EFX_RXQ_MIN_ENT 128U #define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx)) +#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \ + EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE) + /* Filters */ /** @@ -134,17 +137,6 @@ efx_filter_get_filter_safe(struct efx_nic *efx, return efx->type->filter_get_safe(efx, priority, filter_id, spec); } -/** - * efx_farch_filter_clear_rx - remove RX filters by priority - * @efx: NIC from which to remove the filters - * @priority: Maximum priority to remove - */ -static inline void efx_filter_clear_rx(struct efx_nic *efx, - enum efx_filter_priority priority) -{ - return efx->type->filter_clear_rx(efx, priority); -} - static inline u32 efx_filter_count_rx_used(struct efx_nic *efx, enum efx_filter_priority priority) { diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h index 7fdfee019092..75ef7ef6450b 100644 --- a/drivers/net/ethernet/sfc/enum.h +++ b/drivers/net/ethernet/sfc/enum.h @@ -165,6 +165,7 @@ enum reset_type { RESET_TYPE_DMA_ERROR, RESET_TYPE_TX_SKIP, RESET_TYPE_MC_FAILURE, + RESET_TYPE_MC_BIST, RESET_TYPE_MAX, }; diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 1f529fa2edb1..229428915aa8 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -318,6 +318,8 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx, "eventq.int", NULL); } + efx_fill_test(n++, strings, data, &tests->memory, + "core", 0, "memory", NULL); efx_fill_test(n++, strings, data, &tests->registers, "core", 0, "registers", NULL); @@ -357,7 +359,8 @@ static int efx_ethtool_get_sset_count(struct net_device *net_dev, switch (string_set) { case ETH_SS_STATS: return efx->type->describe_stats(efx, NULL) + - EFX_ETHTOOL_SW_STAT_COUNT; + EFX_ETHTOOL_SW_STAT_COUNT + + efx_ptp_describe_stats(efx, NULL); case ETH_SS_TEST: return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL); default: @@ -378,6 +381,8 @@ static void efx_ethtool_get_strings(struct net_device *net_dev, for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) strlcpy(strings + i * ETH_GSTRING_LEN, efx_sw_stat_desc[i].name, ETH_GSTRING_LEN); + strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN; + efx_ptp_describe_stats(efx, strings); break; case ETH_SS_TEST: efx_ethtool_fill_self_tests(efx, NULL, strings, NULL); @@ -427,8 +432,11 @@ static void efx_ethtool_get_stats(struct net_device *net_dev, break; } } + data += EFX_ETHTOOL_SW_STAT_COUNT; spin_unlock_bh(&efx->stats_lock); + + efx_ptp_update_stats(efx, data); } static void efx_ethtool_self_test(struct net_device *net_dev, @@ -583,7 +591,7 @@ static void efx_ethtool_get_ringparam(struct net_device *net_dev, struct efx_nic *efx = netdev_priv(net_dev); ring->rx_max_pending = EFX_MAX_DMAQ_SIZE; - ring->tx_max_pending = EFX_MAX_DMAQ_SIZE; + ring->tx_max_pending = EFX_TXQ_MAX_ENT(efx); ring->rx_pending = efx->rxq_entries; ring->tx_pending = efx->txq_entries; } @@ -596,7 +604,7 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev, if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->rx_pending > EFX_MAX_DMAQ_SIZE || - ring->tx_pending > EFX_MAX_DMAQ_SIZE) + ring->tx_pending > EFX_TXQ_MAX_ENT(efx)) return -EINVAL; if (ring->rx_pending < EFX_RXQ_MIN_ENT) { @@ -1032,7 +1040,7 @@ static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev, struct efx_nic *efx = netdev_priv(net_dev); memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table)); - efx_nic_push_rx_indir_table(efx); + efx->type->rx_push_rss_config(efx); return 0; } diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index ff5d322b9b49..18d6f761f4d0 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -469,6 +469,24 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) } /************************************************************************** * + * RSS + * + ************************************************************************** + */ + +static void falcon_b0_rx_push_rss_config(struct efx_nic *efx) +{ + efx_oword_t temp; + + /* Set hash key for IPv4 */ + memcpy(&temp, efx->rx_hash_key, sizeof(temp)); + efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); + + efx_farch_rx_push_indir_table(efx); +} + +/************************************************************************** + * * EEPROM/flash * ************************************************************************** @@ -2247,6 +2265,8 @@ static int falcon_probe_nic(struct efx_nic *efx) struct falcon_board *board; int rc; + efx->primary = efx; /* only one usable function per controller */ + /* Allocate storage for hardware specific data */ nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); if (!nic_data) @@ -2482,9 +2502,7 @@ static int falcon_init_nic(struct efx_nic *efx) falcon_init_rx_cfg(efx); if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { - /* Set hash key for IPv4 */ - memcpy(&temp, efx->rx_hash_key, sizeof(temp)); - efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); + falcon_b0_rx_push_rss_config(efx); /* Set destination of both TX and RX Flush events */ EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0); @@ -2593,6 +2611,14 @@ void falcon_start_nic_stats(struct efx_nic *efx) spin_unlock_bh(&efx->stats_lock); } +/* We don't acutally pull stats on falcon. Wait 10ms so that + * they arrive when we call this just after start_stats + */ +static void falcon_pull_nic_stats(struct efx_nic *efx) +{ + msleep(10); +} + void falcon_stop_nic_stats(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; @@ -2672,6 +2698,7 @@ const struct efx_nic_type falcon_a1_nic_type = { .describe_stats = falcon_describe_nic_stats, .update_stats = falcon_update_nic_stats, .start_stats = falcon_start_nic_stats, + .pull_stats = falcon_pull_nic_stats, .stop_stats = falcon_stop_nic_stats, .set_id_led = falcon_set_id_led, .push_irq_moderation = falcon_push_irq_moderation, @@ -2692,7 +2719,7 @@ const struct efx_nic_type falcon_a1_nic_type = { .tx_init = efx_farch_tx_init, .tx_remove = efx_farch_tx_remove, .tx_write = efx_farch_tx_write, - .rx_push_indir_table = efx_farch_rx_push_indir_table, + .rx_push_rss_config = efx_port_dummy_op_void, .rx_probe = efx_farch_rx_probe, .rx_init = efx_farch_rx_init, .rx_remove = efx_farch_rx_remove, @@ -2765,6 +2792,7 @@ const struct efx_nic_type falcon_b0_nic_type = { .describe_stats = falcon_describe_nic_stats, .update_stats = falcon_update_nic_stats, .start_stats = falcon_start_nic_stats, + .pull_stats = falcon_pull_nic_stats, .stop_stats = falcon_stop_nic_stats, .set_id_led = falcon_set_id_led, .push_irq_moderation = falcon_push_irq_moderation, @@ -2786,7 +2814,7 @@ const struct efx_nic_type falcon_b0_nic_type = { .tx_init = efx_farch_tx_init, .tx_remove = efx_farch_tx_remove, .tx_write = efx_farch_tx_write, - .rx_push_indir_table = efx_farch_rx_push_indir_table, + .rx_push_rss_config = falcon_b0_rx_push_rss_config, .rx_probe = efx_farch_rx_probe, .rx_init = efx_farch_rx_init, .rx_remove = efx_farch_rx_remove, diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index c0907d884d75..f72489a105ca 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -1147,7 +1147,7 @@ static void efx_farch_handle_generated_event(struct efx_channel *channel, /* The queue must be empty, so we won't receive any rx * events, so efx_process_channel() won't refill the * queue. Refill it here */ - efx_fast_push_rx_descriptors(rx_queue); + efx_fast_push_rx_descriptors(rx_queue, true); } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { efx_farch_handle_drain_event(channel); } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { @@ -1618,8 +1618,7 @@ void efx_farch_rx_push_indir_table(struct efx_nic *efx) size_t i = 0; efx_dword_t dword; - if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) - return; + BUG_ON(efx_nic_rev(efx) < EFX_REV_FALCON_B0); BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != FR_BZ_RX_INDIRECTION_TBL_ROWS); @@ -1745,8 +1744,6 @@ void efx_farch_init_common(struct efx_nic *efx) EFX_INVERT_OWORD(temp); efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); - efx_farch_rx_push_indir_table(efx); - /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. */ @@ -2187,14 +2184,14 @@ efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec, } static void -efx_farch_filter_init_rx_for_stack(struct efx_nic *efx, - struct efx_farch_filter_spec *spec) +efx_farch_filter_init_rx_auto(struct efx_nic *efx, + struct efx_farch_filter_spec *spec) { /* If there's only one channel then disable RSS for non VF * traffic, thereby allowing VFs to use RSS when the PF can't. */ - spec->priority = EFX_FILTER_PRI_REQUIRED; - spec->flags = (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_STACK | + spec->priority = EFX_FILTER_PRI_AUTO; + spec->flags = (EFX_FILTER_FLAG_RX | (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) | (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0)); spec->dmaq_id = 0; @@ -2459,20 +2456,13 @@ s32 efx_farch_filter_insert(struct efx_nic *efx, rc = -EEXIST; goto out; } - if (spec.priority < saved_spec->priority && - !(saved_spec->priority == EFX_FILTER_PRI_REQUIRED && - saved_spec->flags & EFX_FILTER_FLAG_RX_STACK)) { + if (spec.priority < saved_spec->priority) { rc = -EPERM; goto out; } - if (spec.flags & EFX_FILTER_FLAG_RX_STACK) { - /* Just make sure it won't be removed */ - saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK; - rc = 0; - goto out; - } - /* Retain the RX_STACK flag */ - spec.flags |= saved_spec->flags & EFX_FILTER_FLAG_RX_STACK; + if (saved_spec->priority == EFX_FILTER_PRI_AUTO || + saved_spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) + spec.flags |= EFX_FILTER_FLAG_RX_OVER_AUTO; } /* Insert the filter */ @@ -2553,11 +2543,11 @@ static int efx_farch_filter_remove(struct efx_nic *efx, struct efx_farch_filter_spec *spec = &table->spec[filter_idx]; if (!test_bit(filter_idx, table->used_bitmap) || - spec->priority > priority) + spec->priority != priority) return -ENOENT; - if (spec->flags & EFX_FILTER_FLAG_RX_STACK) { - efx_farch_filter_init_rx_for_stack(efx, spec); + if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) { + efx_farch_filter_init_rx_auto(efx, spec); efx_farch_filter_push_rx_config(efx); } else { efx_farch_filter_table_clear_entry(efx, table, filter_idx); @@ -2640,12 +2630,15 @@ efx_farch_filter_table_clear(struct efx_nic *efx, unsigned int filter_idx; spin_lock_bh(&efx->filter_lock); - for (filter_idx = 0; filter_idx < table->size; ++filter_idx) - efx_farch_filter_remove(efx, table, filter_idx, priority); + for (filter_idx = 0; filter_idx < table->size; ++filter_idx) { + if (table->spec[filter_idx].priority != EFX_FILTER_PRI_AUTO) + efx_farch_filter_remove(efx, table, + filter_idx, priority); + } spin_unlock_bh(&efx->filter_lock); } -void efx_farch_filter_clear_rx(struct efx_nic *efx, +int efx_farch_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority) { efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP, @@ -2654,6 +2647,7 @@ void efx_farch_filter_clear_rx(struct efx_nic *efx, priority); efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF, priority); + return 0; } u32 efx_farch_filter_count_rx_used(struct efx_nic *efx, @@ -2822,7 +2816,7 @@ int efx_farch_filter_table_probe(struct efx_nic *efx) for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) { spec = &table->spec[i]; spec->type = EFX_FARCH_FILTER_UC_DEF + i; - efx_farch_filter_init_rx_for_stack(efx, spec); + efx_farch_filter_init_rx_auto(efx, spec); __set_bit(i, table->used_bitmap); } } diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h index 63c77a557178..3ef298d3c47e 100644 --- a/drivers/net/ethernet/sfc/filter.h +++ b/drivers/net/ethernet/sfc/filter.h @@ -59,12 +59,16 @@ enum efx_filter_match_flags { /** * enum efx_filter_priority - priority of a hardware filter specification * @EFX_FILTER_PRI_HINT: Performance hint + * @EFX_FILTER_PRI_AUTO: Automatic filter based on device address list + * or hardware requirements. This may only be used by the filter + * implementation for each NIC type. * @EFX_FILTER_PRI_MANUAL: Manually configured filter * @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour (user-level * networking and SR-IOV) */ enum efx_filter_priority { EFX_FILTER_PRI_HINT = 0, + EFX_FILTER_PRI_AUTO, EFX_FILTER_PRI_MANUAL, EFX_FILTER_PRI_REQUIRED, }; @@ -78,19 +82,18 @@ enum efx_filter_priority { * according to the indirection table. * @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving * queue. - * @EFX_FILTER_FLAG_RX_STACK: Indicates a filter inserted for the - * network stack. The filter must have a priority of - * %EFX_FILTER_PRI_REQUIRED. It can be steered by a replacement - * request with priority %EFX_FILTER_PRI_MANUAL, and a removal - * request with priority %EFX_FILTER_PRI_MANUAL will reset the - * steering (but not remove the filter). + * @EFX_FILTER_FLAG_RX_OVER_AUTO: Indicates a filter that is + * overriding an automatic filter (priority + * %EFX_FILTER_PRI_AUTO). This may only be set by the filter + * implementation for each type. A removal request will restore + * the automatic filter in its place. * @EFX_FILTER_FLAG_RX: Filter is for RX * @EFX_FILTER_FLAG_TX: Filter is for TX */ enum efx_filter_flags { EFX_FILTER_FLAG_RX_RSS = 0x01, EFX_FILTER_FLAG_RX_SCATTER = 0x02, - EFX_FILTER_FLAG_RX_STACK = 0x04, + EFX_FILTER_FLAG_RX_OVER_AUTO = 0x04, EFX_FILTER_FLAG_RX = 0x08, EFX_FILTER_FLAG_TX = 0x10, }; diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 4b0bd8a1514d..eb59abb57e85 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -42,6 +42,7 @@ struct efx_mcdi_async_param { unsigned int cmd; size_t inlen; size_t outlen; + bool quiet; efx_mcdi_async_completer *complete; unsigned long cookie; /* followed by request/response buffer */ @@ -101,6 +102,10 @@ int efx_mcdi_init(struct efx_nic *efx) netif_err(efx, probe, efx->net_dev, "Host already registered with MCPU\n"); + if (efx->mcdi->fn_flags & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) + efx->primary = efx; + return 0; } @@ -191,6 +196,8 @@ static int efx_mcdi_errno(unsigned int mcdi_err) TRANSLATE_ERROR(EALREADY); TRANSLATE_ERROR(ENOSPC); #undef TRANSLATE_ERROR + case MC_CMD_ERR_ENOTSUP: + return -EOPNOTSUPP; case MC_CMD_ERR_ALLOC_FAIL: return -ENOBUFS; case MC_CMD_ERR_MAC_EXIST: @@ -402,8 +409,9 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout) { struct efx_nic *efx = mcdi->efx; struct efx_mcdi_async_param *async; - size_t hdr_len, data_len; + size_t hdr_len, data_len, err_len; efx_dword_t *outbuf; + MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0); int rc; if (cmpxchg(&mcdi->state, @@ -444,6 +452,13 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout) outbuf = (efx_dword_t *)(async + 1); efx->type->mcdi_read_response(efx, outbuf, hdr_len, min(async->outlen, data_len)); + if (!timeout && rc && !async->quiet) { + err_len = min(sizeof(errbuf), data_len); + efx->type->mcdi_read_response(efx, errbuf, hdr_len, + sizeof(errbuf)); + efx_mcdi_display_error(efx, async->cmd, async->inlen, errbuf, + err_len, rc); + } async->complete(efx, async->cookie, rc, outbuf, data_len); kfree(async); @@ -519,18 +534,129 @@ efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen) return 0; } +static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual, bool quiet) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0); + int rc; + + if (mcdi->mode == MCDI_MODE_POLL) + rc = efx_mcdi_poll(efx); + else + rc = efx_mcdi_await_completion(efx); + + if (rc != 0) { + netif_err(efx, hw, efx->net_dev, + "MC command 0x%x inlen %d mode %d timed out\n", + cmd, (int)inlen, mcdi->mode); + + if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) { + netif_err(efx, hw, efx->net_dev, + "MCDI request was completed without an event\n"); + rc = 0; + } + + /* Close the race with efx_mcdi_ev_cpl() executing just too late + * and completing a request we've just cancelled, by ensuring + * that the seqno check therein fails. + */ + spin_lock_bh(&mcdi->iface_lock); + ++mcdi->seqno; + ++mcdi->credits; + spin_unlock_bh(&mcdi->iface_lock); + } + + if (rc != 0) { + if (outlen_actual) + *outlen_actual = 0; + } else { + size_t hdr_len, data_len, err_len; + + /* At the very least we need a memory barrier here to ensure + * we pick up changes from efx_mcdi_ev_cpl(). Protect against + * a spurious efx_mcdi_ev_cpl() running concurrently by + * acquiring the iface_lock. */ + spin_lock_bh(&mcdi->iface_lock); + rc = mcdi->resprc; + hdr_len = mcdi->resp_hdr_len; + data_len = mcdi->resp_data_len; + err_len = min(sizeof(errbuf), data_len); + spin_unlock_bh(&mcdi->iface_lock); + + BUG_ON(rc > 0); + + efx->type->mcdi_read_response(efx, outbuf, hdr_len, + min(outlen, data_len)); + if (outlen_actual) + *outlen_actual = data_len; + + efx->type->mcdi_read_response(efx, errbuf, hdr_len, err_len); + + if (cmd == MC_CMD_REBOOT && rc == -EIO) { + /* Don't reset if MC_CMD_REBOOT returns EIO */ + } else if (rc == -EIO || rc == -EINTR) { + netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n", + -rc); + efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); + } else if (rc && !quiet) { + efx_mcdi_display_error(efx, cmd, inlen, errbuf, err_len, + rc); + } + + if (rc == -EIO || rc == -EINTR) { + msleep(MCDI_STATUS_SLEEP_MS); + efx_mcdi_poll_reboot(efx); + mcdi->new_epoch = true; + } + } + + efx_mcdi_release(mcdi); + return rc; +} + +static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual, bool quiet) +{ + int rc; + + rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen); + if (rc) { + if (outlen_actual) + *outlen_actual = 0; + return rc; + } + return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, + outlen_actual, quiet); +} + int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf, size_t inlen, efx_dword_t *outbuf, size_t outlen, size_t *outlen_actual) { - int rc; + return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen, + outlen_actual, false); +} - rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen); - if (rc) - return rc; - return efx_mcdi_rpc_finish(efx, cmd, inlen, - outbuf, outlen, outlen_actual); +/* Normally, on receiving an error code in the MCDI response, + * efx_mcdi_rpc will log an error message containing (among other + * things) the raw error code, by means of efx_mcdi_display_error. + * This _quiet version suppresses that; if the caller wishes to log + * the error conditionally on the return code, it should call this + * function and is then responsible for calling efx_mcdi_display_error + * as needed. + */ +int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual) +{ + return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen, + outlen_actual, true); } int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, @@ -543,35 +669,19 @@ int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, if (rc) return rc; + if (efx->mc_bist_for_other_fn) + return -ENETDOWN; + efx_mcdi_acquire_sync(mcdi); efx_mcdi_send_request(efx, cmd, inbuf, inlen); return 0; } -/** - * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously - * @efx: NIC through which to issue the command - * @cmd: Command type number - * @inbuf: Command parameters - * @inlen: Length of command parameters, in bytes - * @outlen: Length to allocate for response buffer, in bytes - * @complete: Function to be called on completion or cancellation. - * @cookie: Arbitrary value to be passed to @complete. - * - * This function does not sleep and therefore may be called in atomic - * context. It will fail if event queues are disabled or if MCDI - * event completions have been disabled due to an error. - * - * If it succeeds, the @complete function will be called exactly once - * in atomic context, when one of the following occurs: - * (a) the completion event is received (in NAPI context) - * (b) event queues are disabled (in the process that disables them) - * (c) the request times-out (in timer context) - */ -int -efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, - const efx_dword_t *inbuf, size_t inlen, size_t outlen, - efx_mcdi_async_completer *complete, unsigned long cookie) +static int _efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + size_t outlen, + efx_mcdi_async_completer *complete, + unsigned long cookie, bool quiet) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); struct efx_mcdi_async_param *async; @@ -581,6 +691,9 @@ efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, if (rc) return rc; + if (efx->mc_bist_for_other_fn) + return -ENETDOWN; + async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4), GFP_ATOMIC); if (!async) @@ -589,6 +702,7 @@ efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, async->cmd = cmd; async->inlen = inlen; async->outlen = outlen; + async->quiet = quiet; async->complete = complete; async->cookie = cookie; memcpy(async + 1, inbuf, inlen); @@ -617,79 +731,73 @@ efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, return rc; } +/** + * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously + * @efx: NIC through which to issue the command + * @cmd: Command type number + * @inbuf: Command parameters + * @inlen: Length of command parameters, in bytes + * @outlen: Length to allocate for response buffer, in bytes + * @complete: Function to be called on completion or cancellation. + * @cookie: Arbitrary value to be passed to @complete. + * + * This function does not sleep and therefore may be called in atomic + * context. It will fail if event queues are disabled or if MCDI + * event completions have been disabled due to an error. + * + * If it succeeds, the @complete function will be called exactly once + * in atomic context, when one of the following occurs: + * (a) the completion event is received (in NAPI context) + * (b) event queues are disabled (in the process that disables them) + * (c) the request times-out (in timer context) + */ +int +efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, size_t outlen, + efx_mcdi_async_completer *complete, unsigned long cookie) +{ + return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete, + cookie, false); +} + +int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + size_t outlen, efx_mcdi_async_completer *complete, + unsigned long cookie) +{ + return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete, + cookie, true); +} + int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, efx_dword_t *outbuf, size_t outlen, size_t *outlen_actual) { - struct efx_mcdi_iface *mcdi = efx_mcdi(efx); - int rc; - - if (mcdi->mode == MCDI_MODE_POLL) - rc = efx_mcdi_poll(efx); - else - rc = efx_mcdi_await_completion(efx); - - if (rc != 0) { - netif_err(efx, hw, efx->net_dev, - "MC command 0x%x inlen %d mode %d timed out\n", - cmd, (int)inlen, mcdi->mode); - - if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) { - netif_err(efx, hw, efx->net_dev, - "MCDI request was completed without an event\n"); - rc = 0; - } - - /* Close the race with efx_mcdi_ev_cpl() executing just too late - * and completing a request we've just cancelled, by ensuring - * that the seqno check therein fails. - */ - spin_lock_bh(&mcdi->iface_lock); - ++mcdi->seqno; - ++mcdi->credits; - spin_unlock_bh(&mcdi->iface_lock); - } - - if (rc == 0) { - size_t hdr_len, data_len; - - /* At the very least we need a memory barrier here to ensure - * we pick up changes from efx_mcdi_ev_cpl(). Protect against - * a spurious efx_mcdi_ev_cpl() running concurrently by - * acquiring the iface_lock. */ - spin_lock_bh(&mcdi->iface_lock); - rc = mcdi->resprc; - hdr_len = mcdi->resp_hdr_len; - data_len = mcdi->resp_data_len; - spin_unlock_bh(&mcdi->iface_lock); - - BUG_ON(rc > 0); + return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, + outlen_actual, false); +} - if (rc == 0) { - efx->type->mcdi_read_response(efx, outbuf, hdr_len, - min(outlen, data_len)); - if (outlen_actual != NULL) - *outlen_actual = data_len; - } else if (cmd == MC_CMD_REBOOT && rc == -EIO) - ; /* Don't reset if MC_CMD_REBOOT returns EIO */ - else if (rc == -EIO || rc == -EINTR) { - netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n", - -rc); - efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); - } else - netif_dbg(efx, hw, efx->net_dev, - "MC command 0x%x inlen %d failed rc=%d\n", - cmd, (int)inlen, -rc); +int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual) +{ + return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, + outlen_actual, true); +} - if (rc == -EIO || rc == -EINTR) { - msleep(MCDI_STATUS_SLEEP_MS); - efx_mcdi_poll_reboot(efx); - mcdi->new_epoch = true; - } - } +void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd, + size_t inlen, efx_dword_t *outbuf, + size_t outlen, int rc) +{ + int code = 0, err_arg = 0; - efx_mcdi_release(mcdi); - return rc; + if (outlen >= MC_CMD_ERR_CODE_OFST + 4) + code = MCDI_DWORD(outbuf, ERR_CODE); + if (outlen >= MC_CMD_ERR_ARG_OFST + 4) + err_arg = MCDI_DWORD(outbuf, ERR_ARG); + netif_err(efx, hw, efx->net_dev, + "MC command 0x%x inlen %d failed rc=%d (raw=%d) arg=%d\n", + cmd, (int)inlen, rc, code, err_arg); } /* Switch to polled MCDI completions. This can be called in various @@ -834,6 +942,30 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) spin_unlock(&mcdi->iface_lock); } +/* The MC is going down in to BIST mode. set the BIST flag to block + * new MCDI, cancel any outstanding MCDI and and schedule a BIST-type reset + * (which doesn't actually execute a reset, it waits for the controlling + * function to reset it). + */ +static void efx_mcdi_ev_bist(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + spin_lock(&mcdi->iface_lock); + efx->mc_bist_for_other_fn = true; + if (efx_mcdi_complete_sync(mcdi)) { + if (mcdi->mode == MCDI_MODE_EVENTS) { + mcdi->resprc = -EIO; + mcdi->resp_hdr_len = 0; + mcdi->resp_data_len = 0; + ++mcdi->credits; + } + } + mcdi->new_epoch = true; + efx_schedule_reset(efx, RESET_TYPE_MC_BIST); + spin_unlock(&mcdi->iface_lock); +} + /* Called from falcon_process_eventq for MCDI events */ void efx_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event) @@ -867,14 +999,18 @@ void efx_mcdi_process_event(struct efx_channel *channel, efx_mcdi_sensor_event(efx, event); break; case MCDI_EVENT_CODE_SCHEDERR: - netif_info(efx, hw, efx->net_dev, - "MC Scheduler error address=0x%x\n", data); + netif_dbg(efx, hw, efx->net_dev, + "MC Scheduler alert (0x%x)\n", data); break; case MCDI_EVENT_CODE_REBOOT: case MCDI_EVENT_CODE_MC_REBOOT: netif_info(efx, hw, efx->net_dev, "MC Reboot\n"); efx_mcdi_ev_death(efx, -EIO); break; + case MCDI_EVENT_CODE_MC_BIST: + netif_info(efx, hw, efx->net_dev, "MC entered BIST mode\n"); + efx_mcdi_ev_bist(efx); + break; case MCDI_EVENT_CODE_MAC_STATS_DMA: /* MAC stats are gather lazily. We can ignore this. */ break; @@ -886,6 +1022,9 @@ void efx_mcdi_process_event(struct efx_channel *channel, case MCDI_EVENT_CODE_PTP_PPS: efx_ptp_event(efx, event); break; + case MCDI_EVENT_CODE_PTP_TIME: + efx_time_sync_event(channel, event); + break; case MCDI_EVENT_CODE_TX_FLUSH: case MCDI_EVENT_CODE_RX_FLUSH: /* Two flush events will be sent: one to the same event @@ -1000,13 +1139,27 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, goto fail; } + if (driver_operating) { + if (outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN) { + efx->mcdi->fn_flags = + MCDI_DWORD(outbuf, + DRV_ATTACH_EXT_OUT_FUNC_FLAGS); + } else { + /* Synthesise flags for Siena */ + efx->mcdi->fn_flags = + 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL | + 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED | + (efx_port_num(efx) == 0) << + MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY; + } + } + /* We currently assume we have control of the external link * and are completely trusted by firmware. Abort probing * if that's not true for this function. */ if (driver_operating && - outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN && - (MCDI_DWORD(outbuf, DRV_ATTACH_EXT_OUT_FUNC_FLAGS) & + (efx->mcdi->fn_flags & (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL | 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) != (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL | @@ -1097,13 +1250,6 @@ int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq) rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf), NULL, 0, NULL); - if (rc) - goto fail; - - return 0; - -fail: - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } @@ -1220,7 +1366,7 @@ fail1: static int efx_mcdi_read_assertion(struct efx_nic *efx) { MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN); - MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN); + MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN); unsigned int flags, index; const char *reason; size_t outlen; @@ -1235,13 +1381,17 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx) retry = 2; do { MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1); - rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS, - inbuf, MC_CMD_GET_ASSERTS_IN_LEN, - outbuf, sizeof(outbuf), &outlen); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS, + inbuf, MC_CMD_GET_ASSERTS_IN_LEN, + outbuf, sizeof(outbuf), &outlen); } while ((rc == -EINTR || rc == -EIO) && retry-- > 0); - if (rc) + if (rc) { + efx_mcdi_display_error(efx, MC_CMD_GET_ASSERTS, + MC_CMD_GET_ASSERTS_IN_LEN, outbuf, + outlen, rc); return rc; + } if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN) return -EIO; @@ -1319,17 +1469,18 @@ void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), NULL, 0, NULL); - if (rc) - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", - __func__, rc); } -static int efx_mcdi_reset_port(struct efx_nic *efx) +static int efx_mcdi_reset_func(struct efx_nic *efx) { - int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL); - if (rc) - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", - __func__, rc); + MCDI_DECLARE_BUF(inbuf, MC_CMD_ENTITY_RESET_IN_LEN); + int rc; + + BUILD_BUG_ON(MC_CMD_ENTITY_RESET_OUT_LEN != 0); + MCDI_POPULATE_DWORD_1(inbuf, ENTITY_RESET_IN_FLAG, + ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1); + rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, inbuf, sizeof(inbuf), + NULL, 0, NULL); return rc; } @@ -1347,7 +1498,6 @@ static int efx_mcdi_reset_mc(struct efx_nic *efx) return 0; if (rc == 0) rc = -EIO; - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } @@ -1368,7 +1518,7 @@ int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method) if (method == RESET_TYPE_WORLD) return efx_mcdi_reset_mc(efx); else - return efx_mcdi_reset_port(efx); + return efx_mcdi_reset_func(efx); } static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, @@ -1449,13 +1599,6 @@ int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id) rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf), NULL, 0, NULL); - if (rc) - goto fail; - - return 0; - -fail: - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } @@ -1496,13 +1639,6 @@ int efx_mcdi_wol_filter_reset(struct efx_nic *efx) int rc; rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL); - if (rc) - goto fail; - - return 0; - -fail: - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } @@ -1532,13 +1668,6 @@ static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf), NULL, 0, NULL); - if (rc) - goto fail; - - return 0; - -fail: - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } @@ -1558,14 +1687,10 @@ static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); if (rc) - goto fail; + return rc; memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length); return 0; - -fail: - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); - return rc; } static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, @@ -1585,13 +1710,6 @@ static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), NULL, 0, NULL); - if (rc) - goto fail; - - return 0; - -fail: - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } @@ -1609,13 +1727,6 @@ static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf), NULL, 0, NULL); - if (rc) - goto fail; - - return 0; - -fail: - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } @@ -1630,13 +1741,6 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf), NULL, 0, NULL); - if (rc) - goto fail; - - return 0; - -fail: - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 15816cacb548..52931aebf3c3 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -94,12 +94,14 @@ struct efx_mcdi_mtd_partition { * struct efx_mcdi_data - extra state for NICs that implement MCDI * @iface: Interface/protocol state * @hwmon: Hardware monitor state + * @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH. */ struct efx_mcdi_data { struct efx_mcdi_iface iface; #ifdef CONFIG_SFC_MCDI_MON struct efx_mcdi_mon hwmon; #endif + u32 fn_flags; }; #ifdef CONFIG_SFC_MCDI_MON @@ -116,12 +118,19 @@ void efx_mcdi_fini(struct efx_nic *efx); int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf, size_t inlen, efx_dword_t *outbuf, size_t outlen, size_t *outlen_actual); +int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual); int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf, size_t inlen); int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, efx_dword_t *outbuf, size_t outlen, size_t *outlen_actual); +int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, + size_t inlen, efx_dword_t *outbuf, + size_t outlen, size_t *outlen_actual); typedef void efx_mcdi_async_completer(struct efx_nic *efx, unsigned long cookie, int rc, @@ -131,6 +140,15 @@ int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, const efx_dword_t *inbuf, size_t inlen, size_t outlen, efx_mcdi_async_completer *complete, unsigned long cookie); +int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + size_t outlen, + efx_mcdi_async_completer *complete, + unsigned long cookie); + +void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd, + size_t inlen, efx_dword_t *outbuf, + size_t outlen, int rc); int efx_mcdi_poll_reboot(struct efx_nic *efx); void efx_mcdi_mode_poll(struct efx_nic *efx); @@ -147,6 +165,8 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); */ #define MCDI_DECLARE_BUF(_name, _len) \ efx_dword_t _name[DIV_ROUND_UP(_len, 4)] +#define MCDI_DECLARE_BUF_OUT_OR_ERR(_name, _len) \ + MCDI_DECLARE_BUF(_name, max_t(size_t, _len, 8)) #define _MCDI_PTR(_buf, _offset) \ ((u8 *)(_buf) + (_offset)) #define MCDI_PTR(_buf, _field) \ @@ -301,6 +321,7 @@ int efx_mcdi_set_mac(struct efx_nic *efx); #define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1)) void efx_mcdi_mac_start_stats(struct efx_nic *efx); void efx_mcdi_mac_stop_stats(struct efx_nic *efx); +void efx_mcdi_mac_pull_stats(struct efx_nic *efx); bool efx_mcdi_mac_check_fault(struct efx_nic *efx); enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason); int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method); diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c index d72ad4fc3617..bc27d5b580f5 100644 --- a/drivers/net/ethernet/sfc/mcdi_mon.c +++ b/drivers/net/ethernet/sfc/mcdi_mon.c @@ -24,6 +24,15 @@ enum efx_hwmon_type { EFX_HWMON_IN, /* voltage */ EFX_HWMON_CURR, /* current */ EFX_HWMON_POWER, /* power */ + EFX_HWMON_TYPES_COUNT +}; + +static const char *const efx_hwmon_unit[EFX_HWMON_TYPES_COUNT] = { + [EFX_HWMON_TEMP] = " degC", + [EFX_HWMON_COOL] = " rpm", /* though nonsense for a heatsink */ + [EFX_HWMON_IN] = " mV", + [EFX_HWMON_CURR] = " mA", + [EFX_HWMON_POWER] = " W", }; static const struct { @@ -33,13 +42,13 @@ static const struct { } efx_mcdi_sensor_type[] = { #define SENSOR(name, label, hwmon_type, port) \ [MC_CMD_SENSOR_##name] = { label, EFX_HWMON_ ## hwmon_type, port } - SENSOR(CONTROLLER_TEMP, "Controller ext. temp.", TEMP, -1), + SENSOR(CONTROLLER_TEMP, "Controller board temp.", TEMP, -1), SENSOR(PHY_COMMON_TEMP, "PHY temp.", TEMP, -1), - SENSOR(CONTROLLER_COOLING, "Controller cooling", COOL, -1), + SENSOR(CONTROLLER_COOLING, "Controller heat sink", COOL, -1), SENSOR(PHY0_TEMP, "PHY temp.", TEMP, 0), - SENSOR(PHY0_COOLING, "PHY cooling", COOL, 0), + SENSOR(PHY0_COOLING, "PHY heat sink", COOL, 0), SENSOR(PHY1_TEMP, "PHY temp.", TEMP, 1), - SENSOR(PHY1_COOLING, "PHY cooling", COOL, 1), + SENSOR(PHY1_COOLING, "PHY heat sink", COOL, 1), SENSOR(IN_1V0, "1.0V supply", IN, -1), SENSOR(IN_1V2, "1.2V supply", IN, -1), SENSOR(IN_1V8, "1.8V supply", IN, -1), @@ -47,36 +56,42 @@ static const struct { SENSOR(IN_3V3, "3.3V supply", IN, -1), SENSOR(IN_12V0, "12.0V supply", IN, -1), SENSOR(IN_1V2A, "1.2V analogue supply", IN, -1), - SENSOR(IN_VREF, "ref. voltage", IN, -1), - SENSOR(OUT_VAOE, "AOE power supply", IN, -1), - SENSOR(AOE_TEMP, "AOE temp.", TEMP, -1), - SENSOR(PSU_AOE_TEMP, "AOE PSU temp.", TEMP, -1), - SENSOR(PSU_TEMP, "Controller PSU temp.", TEMP, -1), - SENSOR(FAN_0, NULL, COOL, -1), - SENSOR(FAN_1, NULL, COOL, -1), - SENSOR(FAN_2, NULL, COOL, -1), - SENSOR(FAN_3, NULL, COOL, -1), - SENSOR(FAN_4, NULL, COOL, -1), + SENSOR(IN_VREF, "Ref. voltage", IN, -1), + SENSOR(OUT_VAOE, "AOE FPGA supply", IN, -1), + SENSOR(AOE_TEMP, "AOE FPGA temp.", TEMP, -1), + SENSOR(PSU_AOE_TEMP, "AOE regulator temp.", TEMP, -1), + SENSOR(PSU_TEMP, "Controller regulator temp.", + TEMP, -1), + SENSOR(FAN_0, "Fan 0", COOL, -1), + SENSOR(FAN_1, "Fan 1", COOL, -1), + SENSOR(FAN_2, "Fan 2", COOL, -1), + SENSOR(FAN_3, "Fan 3", COOL, -1), + SENSOR(FAN_4, "Fan 4", COOL, -1), SENSOR(IN_VAOE, "AOE input supply", IN, -1), SENSOR(OUT_IAOE, "AOE output current", CURR, -1), SENSOR(IN_IAOE, "AOE input current", CURR, -1), SENSOR(NIC_POWER, "Board power use", POWER, -1), SENSOR(IN_0V9, "0.9V supply", IN, -1), - SENSOR(IN_I0V9, "0.9V input current", CURR, -1), - SENSOR(IN_I1V2, "1.2V input current", CURR, -1), - SENSOR(IN_0V9_ADC, "0.9V supply (at ADC)", IN, -1), - SENSOR(CONTROLLER_2_TEMP, "Controller ext. temp. 2", TEMP, -1), - SENSOR(VREG_INTERNAL_TEMP, "Voltage regulator temp.", TEMP, -1), + SENSOR(IN_I0V9, "0.9V supply current", CURR, -1), + SENSOR(IN_I1V2, "1.2V supply current", CURR, -1), + SENSOR(IN_0V9_ADC, "0.9V supply (ext. ADC)", IN, -1), + SENSOR(CONTROLLER_2_TEMP, "Controller board temp. 2", TEMP, -1), + SENSOR(VREG_INTERNAL_TEMP, "Regulator die temp.", TEMP, -1), SENSOR(VREG_0V9_TEMP, "0.9V regulator temp.", TEMP, -1), SENSOR(VREG_1V2_TEMP, "1.2V regulator temp.", TEMP, -1), - SENSOR(CONTROLLER_VPTAT, "Controller int. temp. raw", IN, -1), - SENSOR(CONTROLLER_INTERNAL_TEMP, "Controller int. temp.", TEMP, -1), + SENSOR(CONTROLLER_VPTAT, + "Controller PTAT voltage (int. ADC)", IN, -1), + SENSOR(CONTROLLER_INTERNAL_TEMP, + "Controller die temp. (int. ADC)", TEMP, -1), SENSOR(CONTROLLER_VPTAT_EXTADC, - "Controller int. temp. raw (at ADC)", IN, -1), + "Controller PTAT voltage (ext. ADC)", IN, -1), SENSOR(CONTROLLER_INTERNAL_TEMP_EXTADC, - "Controller int. temp. (via ADC)", TEMP, -1), + "Controller die temp. (ext. ADC)", TEMP, -1), SENSOR(AMBIENT_TEMP, "Ambient temp.", TEMP, -1), SENSOR(AIRFLOW, "Air flow raw", IN, -1), + SENSOR(VDD08D_VSS08D_CSR, "0.9V die (int. ADC)", IN, -1), + SENSOR(VDD08D_VSS08D_CSR_EXTADC, "0.9V die (ext. ADC)", IN, -1), + SENSOR(HOTPOINT_TEMP, "Controller board temp. (hotpoint)", TEMP, -1), #undef SENSOR }; @@ -91,7 +106,8 @@ static const char *const sensor_status_names[] = { void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev) { unsigned int type, state, value; - const char *name = NULL, *state_txt; + enum efx_hwmon_type hwmon_type = EFX_HWMON_UNKNOWN; + const char *name = NULL, *state_txt, *unit; type = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR); state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE); @@ -99,16 +115,22 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev) /* Deal gracefully with the board having more drivers than we * know about, but do not expect new sensor states. */ - if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) + if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) { name = efx_mcdi_sensor_type[type].label; + hwmon_type = efx_mcdi_sensor_type[type].hwmon_type; + } if (!name) name = "No sensor name available"; EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names)); state_txt = sensor_status_names[state]; + EFX_BUG_ON_PARANOID(hwmon_type >= EFX_HWMON_TYPES_COUNT); + unit = efx_hwmon_unit[hwmon_type]; + if (!unit) + unit = ""; netif_err(efx, hw, efx->net_dev, - "Sensor %d (%s) reports condition '%s' for raw value %d\n", - type, name, state_txt, value); + "Sensor %d (%s) reports condition '%s' for value %d%s\n", + type, name, state_txt, value, unit); } #ifdef CONFIG_SFC_MCDI_MON diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index e0a63ddb7a6c..a707fb5ef14c 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -224,6 +224,8 @@ #define MC_CMD_ERR_MAC_EXIST 0x1009 /* Slave core not present */ #define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a +/* The datapath is disabled. */ +#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b #define MC_CMD_ERR_CODE_OFST 0 @@ -390,6 +392,8 @@ * AOE_ERR_DATA) */ #define MCDI_EVENT_AOE_BYTEBLASTER 0x9 +/* enum: DDR ECC status update */ +#define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa #define MCDI_EVENT_AOE_ERR_DATA_LBN 8 #define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8 #define MCDI_EVENT_RX_ERR_RXQ_LBN 0 @@ -462,6 +466,10 @@ #define MCDI_EVENT_CODE_ECC_CORR_ERR 0x17 /* enum: the MC has detected an uncorrectable error */ #define MCDI_EVENT_CODE_ECC_FATAL_ERR 0x18 +/* enum: The MC has entered offline BIST mode */ +#define MCDI_EVENT_CODE_MC_BIST 0x19 +/* enum: PTP tick event providing current NIC time */ +#define MCDI_EVENT_CODE_PTP_TIME 0x1a /* enum: Artificial event generated by host and posted via MC for test * purposes. */ @@ -481,15 +489,32 @@ #define MCDI_EVENT_TX_ERR_DATA_OFST 0 #define MCDI_EVENT_TX_ERR_DATA_LBN 0 #define MCDI_EVENT_TX_ERR_DATA_WIDTH 32 -/* Seconds field of timestamp */ +/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of + * timestamp + */ #define MCDI_EVENT_PTP_SECONDS_OFST 0 #define MCDI_EVENT_PTP_SECONDS_LBN 0 #define MCDI_EVENT_PTP_SECONDS_WIDTH 32 -/* Nanoseconds field of timestamp */ +/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of + * timestamp + */ +#define MCDI_EVENT_PTP_MAJOR_OFST 0 +#define MCDI_EVENT_PTP_MAJOR_LBN 0 +#define MCDI_EVENT_PTP_MAJOR_WIDTH 32 +/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field + * of timestamp + */ #define MCDI_EVENT_PTP_NANOSECONDS_OFST 0 #define MCDI_EVENT_PTP_NANOSECONDS_LBN 0 #define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32 -/* Lowest four bytes of sourceUUID from PTP packet */ +/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of + * timestamp + */ +#define MCDI_EVENT_PTP_MINOR_OFST 0 +#define MCDI_EVENT_PTP_MINOR_LBN 0 +#define MCDI_EVENT_PTP_MINOR_WIDTH 32 +/* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet + */ #define MCDI_EVENT_PTP_UUID_OFST 0 #define MCDI_EVENT_PTP_UUID_LBN 0 #define MCDI_EVENT_PTP_UUID_WIDTH 32 @@ -505,6 +530,13 @@ #define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0 #define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0 #define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32 +/* For CODE_PTP_TIME events, the major value of the PTP clock */ +#define MCDI_EVENT_PTP_TIME_MAJOR_OFST 0 +#define MCDI_EVENT_PTP_TIME_MAJOR_LBN 0 +#define MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32 +/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */ +#define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36 +#define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8 /* FCDI_EVENT structuredef */ #define FCDI_EVENT_LEN 8 @@ -545,8 +577,10 @@ #define FCDI_EVENT_CODE_TIMED_READ 0x5 /* enum: One or more PPS IN events */ #define FCDI_EVENT_CODE_PPS_IN 0x6 -/* enum: One or more PPS OUT events */ -#define FCDI_EVENT_CODE_PPS_OUT 0x7 +/* enum: Tick event from PTP clock */ +#define FCDI_EVENT_CODE_PTP_TICK 0x7 +/* enum: ECC error counters */ +#define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8 #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0 #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0 #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32 @@ -560,14 +594,21 @@ #define FCDI_EVENT_LINK_STATE_DATA_OFST 0 #define FCDI_EVENT_LINK_STATE_DATA_LBN 0 #define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32 -#define FCDI_EVENT_PPS_COUNT_OFST 0 -#define FCDI_EVENT_PPS_COUNT_LBN 0 -#define FCDI_EVENT_PPS_COUNT_WIDTH 32 - -/* FCDI_EXTENDED_EVENT structuredef */ -#define FCDI_EXTENDED_EVENT_LENMIN 16 -#define FCDI_EXTENDED_EVENT_LENMAX 248 -#define FCDI_EXTENDED_EVENT_LEN(num) (8+8*(num)) +#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36 +#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8 +#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0 +#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0 +#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32 + +/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events + * to the MC. Note that this structure | is overlayed over a normal FCDI event + * such that bits 32-63 containing | event code, level, source etc remain the + * same. In this case the data | field of the header is defined to be the + * number of timestamps + */ +#define FCDI_EXTENDED_EVENT_PPS_LENMIN 16 +#define FCDI_EXTENDED_EVENT_PPS_LENMAX 248 +#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num)) /* Number of timestamps following */ #define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0 #define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0 @@ -581,14 +622,14 @@ #define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96 #define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32 /* Timestamp records comprising the event */ -#define FCDI_EXTENDED_EVENT_PPS_TIME_OFST 8 -#define FCDI_EXTENDED_EVENT_PPS_TIME_LEN 8 -#define FCDI_EXTENDED_EVENT_PPS_TIME_LO_OFST 8 -#define FCDI_EXTENDED_EVENT_PPS_TIME_HI_OFST 12 -#define FCDI_EXTENDED_EVENT_PPS_TIME_MINNUM 1 -#define FCDI_EXTENDED_EVENT_PPS_TIME_MAXNUM 30 -#define FCDI_EXTENDED_EVENT_PPS_TIME_LBN 64 -#define FCDI_EXTENDED_EVENT_PPS_TIME_WIDTH 64 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64 /***********************************/ @@ -642,6 +683,10 @@ #define MC_CMD_COPYCODE_IN_LEN 16 /* Source address */ #define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0 +/* enum: The main image should be entered via a copy of a single word from and + * to this address when none of the other magic behaviours are required. + */ +#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000 /* enum: Entering the main image via a copy of a single word from and to this * address indicates that it should not attempt to start the datapath CPUs. * This is useful for certain soft rebooting scenarios. (Huntington only) @@ -872,8 +917,28 @@ #define MC_CMD_PTP_OP_RST_CLK 0x14 /* enum: Enable the forwarding of PPS events to the host */ #define MC_CMD_PTP_OP_PPS_ENABLE 0x15 +/* enum: Get the time format used by this NIC for PTP operations */ +#define MC_CMD_PTP_OP_GET_TIME_FORMAT 0x16 +/* enum: Get the clock attributes. NOTE- extended version of + * MC_CMD_PTP_OP_GET_TIME_FORMAT + */ +#define MC_CMD_PTP_OP_GET_ATTRIBUTES 0x16 +/* enum: Get corrections that should be applied to the various different + * timestamps + */ +#define MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS 0x17 +/* enum: Subscribe to receive periodic time events indicating the current NIC + * time + */ +#define MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE 0x18 +/* enum: Unsubscribe to stop receiving time events */ +#define MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19 +/* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS + * input on the same NIC. + */ +#define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a /* enum: Above this for future use. */ -#define MC_CMD_PTP_OP_MAX 0x16 +#define MC_CMD_PTP_OP_MAX 0x1b /* MC_CMD_PTP_IN_ENABLE msgrequest */ #define MC_CMD_PTP_IN_ENABLE_LEN 16 @@ -938,8 +1003,12 @@ #define MC_CMD_PTP_IN_ADJUST_BITS 0x28 /* Time adjustment in seconds */ #define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16 +/* Time adjustment major value */ +#define MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16 /* Time adjustment in nanoseconds */ #define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20 +/* Time adjustment minor value */ +#define MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20 /* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */ #define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20 @@ -1005,8 +1074,12 @@ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ /* Time adjustment in seconds */ #define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8 +/* Time adjustment major value */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8 /* Time adjustment in nanoseconds */ #define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12 +/* Time adjustment minor value */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12 /* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */ #define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16 @@ -1078,9 +1151,51 @@ #define MC_CMD_PTP_ENABLE_PPS 0x0 /* enum: Disable */ #define MC_CMD_PTP_DISABLE_PPS 0x1 -/* Queueid to send events back */ +/* Queue id to send events back */ #define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8 +/* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */ +#define MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ + +/* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */ +#define MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ + +/* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */ +#define MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ + +/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */ +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* Event queue to send PTP time events to */ +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8 + +/* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* Unsubscribe options */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8 +/* enum: Unsubscribe a single queue */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0 +/* enum: Unsubscribe all queues */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1 +/* Event queue ID */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12 + +/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */ +#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* 1 to enable PPS test mode, 0 to disable and return result. */ +#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8 + /* MC_CMD_PTP_OUT msgresponse */ #define MC_CMD_PTP_OUT_LEN 0 @@ -1088,15 +1203,29 @@ #define MC_CMD_PTP_OUT_TRANSMIT_LEN 8 /* Value of seconds timestamp */ #define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0 +/* Timestamp major value */ +#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0 /* Value of nanoseconds timestamp */ #define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4 +/* Timestamp minor value */ +#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4 + +/* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */ +#define MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0 + +/* MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE msgresponse */ +#define MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE_LEN 0 /* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */ #define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8 /* Value of seconds timestamp */ #define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0 +/* Timestamp major value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0 /* Value of nanoseconds timestamp */ #define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4 +/* Timestamp minor value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4 /* MC_CMD_PTP_OUT_STATUS msgresponse */ #define MC_CMD_PTP_OUT_STATUS_LEN 64 @@ -1116,21 +1245,21 @@ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24 /* Number of PPS bad periods */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28 -/* Minimum period of PPS pulse */ +/* Minimum period of PPS pulse in nanoseconds */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32 -/* Maximum period of PPS pulse */ +/* Maximum period of PPS pulse in nanoseconds */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36 -/* Last period of PPS pulse */ +/* Last period of PPS pulse in nanoseconds */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40 -/* Mean period of PPS pulse */ +/* Mean period of PPS pulse in nanoseconds */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44 -/* Minimum offset of PPS pulse (signed) */ +/* Minimum offset of PPS pulse in nanoseconds (signed) */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48 -/* Maximum offset of PPS pulse (signed) */ +/* Maximum offset of PPS pulse in nanoseconds (signed) */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52 -/* Last offset of PPS pulse (signed) */ +/* Last offset of PPS pulse in nanoseconds (signed) */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56 -/* Mean offset of PPS pulse (signed) */ +/* Mean offset of PPS pulse in nanoseconds (signed) */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60 /* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */ @@ -1146,8 +1275,12 @@ #define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0 /* Value of seconds timestamp */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4 +/* Timestamp major value */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4 /* Value of nanoseconds timestamp */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8 +/* Timestamp minor value */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8 /* Host time immediately after NIC's hardware clock read */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12 /* Number of nanoseconds waited after reading NIC's hardware clock */ @@ -1177,6 +1310,16 @@ #define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8 /* enum: Timestamp trigger GPIO not working */ #define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9 +/* enum: Insufficient PPS events to perform checks */ +#define MC_CMD_PTP_MANF_PPS_ENOUGH 0xa +/* enum: PPS time event period not sufficiently close to 1s. */ +#define MC_CMD_PTP_MANF_PPS_PERIOD 0xb +/* enum: PPS time event nS reading not sufficiently close to zero. */ +#define MC_CMD_PTP_MANF_PPS_NS 0xc +/* enum: PTP peripheral registers incorrect */ +#define MC_CMD_PTP_MANF_REGISTERS 0xd +/* enum: Failed to read time from PTP peripheral */ +#define MC_CMD_PTP_MANF_CLOCK_READ 0xe /* Presence of external oscillator */ #define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4 @@ -1198,6 +1341,62 @@ #define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1 #define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252 +/* MC_CMD_PTP_OUT_GET_TIME_FORMAT msgresponse */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_LEN 4 +/* Time format required/used by for this NIC. Applies to all PTP MCDI + * operations that pass times between the host and firmware. If this operation + * is not supported (older firmware) a format of seconds and nanoseconds should + * be assumed. + */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0 +/* enum: Times are in seconds and nanoseconds */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0 +/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_16SECONDS_8NANOSECONDS 0x1 +/* enum: Major register has units of seconds, minor 2^-27s per tick */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2 + +/* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 8 +/* Time format required/used by for this NIC. Applies to all PTP MCDI + * operations that pass times between the host and firmware. If this operation + * is not supported (older firmware) a format of seconds and nanoseconds should + * be assumed. + */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0 +/* enum: Times are in seconds and nanoseconds */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0 +/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1 +/* enum: Major register has units of seconds, minor 2^-27s per tick */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2 +/* Minimum acceptable value for a corrected synchronization timeset. When + * comparing host and NIC clock times, the MC returns a set of samples that + * contain the host start and end time, the MC time when the host start was + * detected and the time the MC waited between reading the time and detecting + * the host end. The corrected sync window is the difference between the host + * end and start times minus the time that the MC waited for host end. + */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4 + +/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16 +/* Uncorrected error on transmit timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0 +/* Uncorrected error on receive timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4 +/* Uncorrected error on PPS output in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8 +/* Uncorrected error on PPS input in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12 + +/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */ +#define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4 +/* Results of testing */ +#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */ + /***********************************/ /* MC_CMD_CSR_READ32 @@ -1923,6 +2122,8 @@ #define MC_CMD_MEDIA_SFP_PLUS 0x5 /* enum: 10GBaseT. */ #define MC_CMD_MEDIA_BASE_T 0x6 +/* enum: QSFP+. */ +#define MC_CMD_MEDIA_QSFP_PLUS 0x7 #define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48 /* enum: Native clause 22 */ #define MC_CMD_MMD_CLAUSE22 0x0 @@ -2223,6 +2424,8 @@ #define MC_CMD_LOOPBACK_SD_FEP_WS 0x21 /* enum: KR Serdes Serial Wireside. */ #define MC_CMD_LOOPBACK_SD_FES_WS 0x22 +/* enum: Near side of AOE Siena side port */ +#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 /* Supported loopbacks. */ #define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8 #define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8 @@ -2286,6 +2489,10 @@ #define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1 #define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3 #define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_LBN 6 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_LBN 7 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1 /* This returns the negotiated flow control value. */ #define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20 /* enum: Flow control is off. */ @@ -3175,7 +3382,7 @@ #define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0 /* MC_CMD_SENSOR_INFO_OUT msgresponse */ -#define MC_CMD_SENSOR_INFO_OUT_LENMIN 12 +#define MC_CMD_SENSOR_INFO_OUT_LENMIN 4 #define MC_CMD_SENSOR_INFO_OUT_LENMAX 252 #define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num)) #define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0 @@ -3269,16 +3476,18 @@ #define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b /* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */ #define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c +/* enum: Hotpoint temperature: degC */ +#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d /* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */ #define MC_CMD_SENSOR_ENTRY_OFST 4 #define MC_CMD_SENSOR_ENTRY_LEN 8 #define MC_CMD_SENSOR_ENTRY_LO_OFST 4 #define MC_CMD_SENSOR_ENTRY_HI_OFST 8 -#define MC_CMD_SENSOR_ENTRY_MINNUM 1 +#define MC_CMD_SENSOR_ENTRY_MINNUM 0 #define MC_CMD_SENSOR_ENTRY_MAXNUM 31 /* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */ -#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 12 +#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 4 #define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252 #define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num)) #define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0 @@ -3291,7 +3500,7 @@ /* MC_CMD_SENSOR_ENTRY_LEN 8 */ /* MC_CMD_SENSOR_ENTRY_LO_OFST 4 */ /* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */ -/* MC_CMD_SENSOR_ENTRY_MINNUM 1 */ +/* MC_CMD_SENSOR_ENTRY_MINNUM 0 */ /* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */ /* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */ @@ -3864,6 +4073,18 @@ #define NVRAM_PARTITION_TYPE_ID_LBN 0 #define NVRAM_PARTITION_TYPE_ID_WIDTH 16 +/* LICENSED_APP_ID structuredef */ +#define LICENSED_APP_ID_LEN 4 +#define LICENSED_APP_ID_ID_OFST 0 +/* enum: OpenOnload */ +#define LICENSED_APP_ID_ONLOAD 0x1 +/* enum: PTP timestamping */ +#define LICENSED_APP_ID_PTP 0x2 +/* enum: SolarCapture Pro */ +#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4 +#define LICENSED_APP_ID_ID_LBN 0 +#define LICENSED_APP_ID_ID_WIDTH 32 + /***********************************/ /* MC_CMD_READ_REGS @@ -4021,6 +4242,8 @@ #define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1 #define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8 #define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20 /* The port ID associated with the v-adaptor which should contain this DMAQ. */ @@ -4179,6 +4402,9 @@ #define MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16 #define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */ +/* MC_CMD_PROXY_CMD_OUT msgresponse */ +#define MC_CMD_PROXY_CMD_OUT_LEN 0 + /***********************************/ /* MC_CMD_ALLOC_BUFTBL_CHUNK @@ -4213,7 +4439,7 @@ /* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */ #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20 -#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 252 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268 #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num)) #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0 /* ID */ @@ -4226,7 +4452,7 @@ #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12 #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16 #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1 -#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 30 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32 /* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */ #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0 @@ -6800,6 +7026,30 @@ /***********************************/ +/* MC_CMD_CAP_BLK_READ + * Read multiple 64bit words from capture block memory + */ +#define MC_CMD_CAP_BLK_READ 0xe7 + +/* MC_CMD_CAP_BLK_READ_IN msgrequest */ +#define MC_CMD_CAP_BLK_READ_IN_LEN 12 +#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0 +#define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4 +#define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8 + +/* MC_CMD_CAP_BLK_READ_OUT msgresponse */ +#define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8 +#define MC_CMD_CAP_BLK_READ_OUT_LENMAX 248 +#define MC_CMD_CAP_BLK_READ_OUT_LEN(num) (0+8*(num)) +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_OFST 0 +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LEN 8 +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_OFST 0 +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_OFST 4 +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MINNUM 1 +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM 31 + + +/***********************************/ /* MC_CMD_DUMP_DO * Take a dump of the DUT state */ @@ -6826,6 +7076,10 @@ #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20 #define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */ #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12 +/* enum: The uart port this command was received over (if using a uart + * transport) + */ +#define MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */ @@ -6942,39 +7196,68 @@ /***********************************/ -/* MC_CMD_START_KR_EYE_PLOT - * Start KR Serdes Eye diagram plot on a given lane. Lane must have valid - * signal. - */ -#define MC_CMD_START_KR_EYE_PLOT 0xee - -/* MC_CMD_START_KR_EYE_PLOT_IN msgrequest */ -#define MC_CMD_START_KR_EYE_PLOT_IN_LEN 4 -#define MC_CMD_START_KR_EYE_PLOT_IN_LANE_OFST 0 - -/* MC_CMD_START_KR_EYE_PLOT_OUT msgresponse */ -#define MC_CMD_START_KR_EYE_PLOT_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_POLL_KR_EYE_PLOT - * Poll KR Serdes Eye diagram plot. Returns one row of BER data. The caller - * should call this command repeatedly after starting eye plot, until no more - * data is returned. - */ -#define MC_CMD_POLL_KR_EYE_PLOT 0xef - -/* MC_CMD_POLL_KR_EYE_PLOT_IN msgrequest */ -#define MC_CMD_POLL_KR_EYE_PLOT_IN_LEN 0 - -/* MC_CMD_POLL_KR_EYE_PLOT_OUT msgresponse */ -#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LENMIN 0 -#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LENMAX 252 -#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LEN(num) (0+2*(num)) -#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_OFST 0 -#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_LEN 2 -#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_MINNUM 0 -#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_MAXNUM 126 +/* MC_CMD_UART_SEND_DATA + * Send checksummed[sic] block of data over the uart. Response is a placeholder + * should we wish to make this reliable; currently requests are fire-and- + * forget. + */ +#define MC_CMD_UART_SEND_DATA 0xee + +/* MC_CMD_UART_SEND_DATA_OUT msgrequest */ +#define MC_CMD_UART_SEND_DATA_OUT_LENMIN 16 +#define MC_CMD_UART_SEND_DATA_OUT_LENMAX 252 +#define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num)) +/* CRC32 over OFFSET, LENGTH, RESERVED, DATA */ +#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0 +/* Offset at which to write the data */ +#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4 +/* Length of data */ +#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8 +/* Reserved for future use */ +#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12 +#define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16 +#define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1 +#define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0 +#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM 236 + +/* MC_CMD_UART_SEND_DATA_IN msgresponse */ +#define MC_CMD_UART_SEND_DATA_IN_LEN 0 + + +/***********************************/ +/* MC_CMD_UART_RECV_DATA + * Request checksummed[sic] block of data over the uart. Only a placeholder, + * subject to change and not currently implemented. + */ +#define MC_CMD_UART_RECV_DATA 0xef + +/* MC_CMD_UART_RECV_DATA_OUT msgrequest */ +#define MC_CMD_UART_RECV_DATA_OUT_LEN 16 +/* CRC32 over OFFSET, LENGTH, RESERVED */ +#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0 +/* Offset from which to read the data */ +#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4 +/* Length of data */ +#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8 +/* Reserved for future use */ +#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12 + +/* MC_CMD_UART_RECV_DATA_IN msgresponse */ +#define MC_CMD_UART_RECV_DATA_IN_LENMIN 16 +#define MC_CMD_UART_RECV_DATA_IN_LENMAX 252 +#define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num)) +/* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */ +#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0 +/* Offset at which to write the data */ +#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4 +/* Length of data */ +#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8 +/* Reserved for future use */ +#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12 +#define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16 +#define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1 +#define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0 +#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM 236 /***********************************/ @@ -7026,6 +7309,15 @@ #define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3 /* enum: Force KR Serdes reset / recalibration */ #define MC_CMD_KR_TUNE_IN_RECAL 0x4 +/* enum: Start KR Serdes Eye diagram plot on a given lane. Lane must have valid + * signal. + */ +#define MC_CMD_KR_TUNE_IN_START_EYE_PLOT 0x5 +/* enum: Poll KR Serdes Eye diagram plot. Returns one row of BER data. The + * caller should call this command repeatedly after starting eye plot, until no + * more data is returned. + */ +#define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6 /* Align the arguments to 32 bits */ #define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1 #define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3 @@ -7123,6 +7415,91 @@ /* MC_CMD_KR_TUNE_RXEQ_SET_OUT msgresponse */ #define MC_CMD_KR_TUNE_RXEQ_SET_OUT_LEN 0 +/* MC_CMD_KR_TUNE_TXEQ_GET_IN msgrequest */ +#define MC_CMD_KR_TUNE_TXEQ_GET_IN_LEN 4 +/* Requested operation */ +#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_LEN 3 + +/* MC_CMD_KR_TUNE_TXEQ_GET_OUT msgresponse */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMIN 4 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMAX 252 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num)) +/* TXEQ Parameter */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_OFST 0 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LEN 4 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8 +/* enum: TX Amplitude */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0 +/* enum: De-Emphasis Tap1 Magnitude (0-7) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1 +/* enum: De-Emphasis Tap1 Fine */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2 +/* enum: De-Emphasis Tap2 Magnitude (0-6) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3 +/* enum: De-Emphasis Tap2 Fine */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4 +/* enum: Pre-Emphasis Magnitude */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5 +/* enum: Pre-Emphasis Fine */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6 +/* enum: TX Slew Rate Coarse control */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7 +/* enum: TX Slew Rate Fine control */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_1 0x1 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_2 0x2 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_3 0x3 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_ALL 0x4 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_LBN 11 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 5 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_LBN 16 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_LBN 24 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_WIDTH 8 + +/* MC_CMD_KR_TUNE_TXEQ_SET_IN msgrequest */ +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMIN 8 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMAX 252 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LEN(num) (4+4*(num)) +/* Requested operation */ +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_LEN 3 +/* TXEQ Parameter */ +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_OFST 4 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LEN 4 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MINNUM 1 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MAXNUM 62 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_LBN 0 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_WIDTH 8 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_ID */ +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_LBN 8 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_WIDTH 3 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_LANE */ +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_LBN 11 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_WIDTH 5 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_LBN 16 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_WIDTH 8 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_LBN 24 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_WIDTH 8 + +/* MC_CMD_KR_TUNE_TXEQ_SET_OUT msgresponse */ +#define MC_CMD_KR_TUNE_TXEQ_SET_OUT_LEN 0 + /* MC_CMD_KR_TUNE_RECAL_IN msgrequest */ #define MC_CMD_KR_TUNE_RECAL_IN_LEN 4 /* Requested operation */ @@ -7135,6 +7512,37 @@ /* MC_CMD_KR_TUNE_RECAL_OUT msgresponse */ #define MC_CMD_KR_TUNE_RECAL_OUT_LEN 0 +/* MC_CMD_KR_TUNE_START_EYE_PLOT_IN msgrequest */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LEN 8 +/* Requested operation */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_OFST 4 + +/* MC_CMD_KR_TUNE_START_EYE_PLOT_OUT msgresponse */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_OUT_LEN 0 + +/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN msgrequest */ +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_LEN 4 +/* Requested operation */ +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3 + +/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT msgresponse */ +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num)) +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126 + /***********************************/ /* MC_CMD_PCIE_TUNE @@ -7157,6 +7565,13 @@ #define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2 /* enum: Override TX Driver settings */ #define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3 +/* enum: Start PCIe Serdes Eye diagram plot on a given lane. */ +#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5 +/* enum: Poll PCIe Serdes Eye diagram plot. Returns one row of BER data. The + * caller should call this command repeatedly after starting eye plot, until no + * more data is returned. + */ +#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6 /* Align the arguments to 32 bits */ #define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1 #define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3 @@ -7258,6 +7673,37 @@ #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24 #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8 +/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN msgrequest */ +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LEN 8 +/* Requested operation */ +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0 +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1 +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3 +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4 + +/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */ +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0 + +/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN msgrequest */ +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_LEN 4 +/* Requested operation */ +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3 + +/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT msgresponse */ +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num)) +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126 + /***********************************/ /* MC_CMD_LICENSING @@ -7310,5 +7756,152 @@ */ #define MC_CMD_MC2MC_PROXY 0xf4 +/* MC_CMD_MC2MC_PROXY_IN msgrequest */ +#define MC_CMD_MC2MC_PROXY_IN_LEN 0 + +/* MC_CMD_MC2MC_PROXY_OUT msgresponse */ +#define MC_CMD_MC2MC_PROXY_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_LICENSED_APP_STATE + * Query the state of an individual licensed application. (Note that the actual + * state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation + * or a reboot of the MC.) + */ +#define MC_CMD_GET_LICENSED_APP_STATE 0xf5 + +/* MC_CMD_GET_LICENSED_APP_STATE_IN msgrequest */ +#define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4 +/* application ID to query (LICENSED_APP_ID_xxx) */ +#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0 + +/* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */ +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4 +/* state of this application */ +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0 +/* enum: no (or invalid) license is present for the application */ +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0 +/* enum: a valid license is present for the application */ +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1 + + +/***********************************/ +/* MC_CMD_LICENSED_APP_OP + * Perform an action for an individual licensed application. + */ +#define MC_CMD_LICENSED_APP_OP 0xf6 + +/* MC_CMD_LICENSED_APP_OP_IN msgrequest */ +#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8 +#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252 +#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num)) +/* application ID */ +#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0 +/* the type of operation requested */ +#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4 +/* enum: validate application */ +#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0 +/* arguments specific to this particular operation */ +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8 +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4 +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0 +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61 + +/* MC_CMD_LICENSED_APP_OP_OUT msgresponse */ +#define MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0 +#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252 +#define MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num)) +/* result specific to this particular operation */ +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0 +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4 +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0 +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63 + +/* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72 +/* application ID */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0 +/* the type of operation requested */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4 +/* validation challenge */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64 + +/* MC_CMD_LICENSED_APP_OP_VALIDATE_OUT msgresponse */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68 +/* feature expiry (time_t) */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0 +/* validation response */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64 + + +/***********************************/ +/* MC_CMD_SET_PORT_SNIFF_CONFIG + * Configure port sniffing for the physical port associated with the calling + * function. Only a privileged function may change the port sniffing + * configuration. A copy of all traffic delivered to the host (non-promiscuous + * mode) or all traffic arriving at the port (promiscuous mode) may be + * delivered to a specific queue, or a set of queues with RSS. + */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7 + +/* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16 +/* configuration flags */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1 +/* receive queue handle (for RSS mode, this is the base queue) */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4 +/* receive mode */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8 +/* enum: receive to just the specified queue */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1 +/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note + * that these handles should be considered opaque to the host, although a value + * of 0xFFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12 + +/* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_PORT_SNIFF_CONFIG + * Obtain the current port sniffing configuration for the physical port + * associated with the calling function. Only a privileged function may read + * the configuration. + */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8 + +/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0 + +/* MC_CMD_GET_PORT_SNIFF_CONFIG_OUT msgresponse */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16 +/* configuration flags */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1 +/* receiving queue handle (for RSS mode, this is the base queue) */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4 +/* receive mode */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8 +/* enum: receiving to just the specified queue */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0 +/* enum: receiving to multiple queues using RSS context */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1 +/* RSS context (for RX_MODE_RSS) */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12 + #endif /* MCDI_PCOL_H */ diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index 7b6be61d549f..91d23252f8fa 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c @@ -90,13 +90,6 @@ static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities, rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf), NULL, 0, NULL); - if (rc) - goto fail; - - return 0; - -fail: - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } @@ -143,17 +136,13 @@ static int efx_mcdi_mdio_read(struct net_device *net_dev, rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); if (rc) - goto fail; + return rc; if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) != MC_CMD_MDIO_STATUS_GOOD) return -EIO; return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE); - -fail: - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); - return rc; } static int efx_mcdi_mdio_write(struct net_device *net_dev, @@ -174,17 +163,13 @@ static int efx_mcdi_mdio_write(struct net_device *net_dev, rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); if (rc) - goto fail; + return rc; if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) != MC_CMD_MDIO_STATUS_GOOD) return -EIO; return 0; - -fail: - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); - return rc; } static u32 mcdi_to_ethtool_cap(u32 media, u32 cap) @@ -487,17 +472,14 @@ static bool efx_mcdi_phy_poll(struct efx_nic *efx) rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, outbuf, sizeof(outbuf), NULL); - if (rc) { - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", - __func__, rc); + if (rc) efx->link_state.up = false; - } else { + else efx_mcdi_phy_decode_link( efx, &efx->link_state, MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED), MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS), MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL)); - } return !efx_link_state_equal(&efx->link_state, &old_state); } @@ -531,11 +513,8 @@ static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *e BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, outbuf, sizeof(outbuf), NULL); - if (rc) { - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", - __func__, rc); + if (rc) return; - } ecmd->lp_advertising = mcdi_to_ethtool_cap(phy_cfg->media, MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP)); @@ -918,21 +897,29 @@ bool efx_mcdi_mac_check_fault(struct efx_nic *efx) rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, outbuf, sizeof(outbuf), &outlength); - if (rc) { - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", - __func__, rc); + if (rc) return true; - } return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0; } -static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr, - u32 dma_len, int enable, int clear) +enum efx_stats_action { + EFX_STATS_ENABLE, + EFX_STATS_DISABLE, + EFX_STATS_PULL, +}; + +static int efx_mcdi_mac_stats(struct efx_nic *efx, + enum efx_stats_action action, int clear) { MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); int rc; - int period = enable ? 1000 : 0; + int change = action == EFX_STATS_PULL ? 0 : 1; + int enable = action == EFX_STATS_ENABLE ? 1 : 0; + int period = action == EFX_STATS_ENABLE ? 1000 : 0; + dma_addr_t dma_addr = efx->stats_buffer.dma_addr; + u32 dma_len = action != EFX_STATS_DISABLE ? + MC_CMD_MAC_NSTATS * sizeof(u64) : 0; BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0); @@ -940,8 +927,8 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr, MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD, MAC_STATS_IN_DMA, !!enable, MAC_STATS_IN_CLEAR, clear, - MAC_STATS_IN_PERIODIC_CHANGE, 1, - MAC_STATS_IN_PERIODIC_ENABLE, !!enable, + MAC_STATS_IN_PERIODIC_CHANGE, change, + MAC_STATS_IN_PERIODIC_ENABLE, enable, MAC_STATS_IN_PERIODIC_CLEAR, 0, MAC_STATS_IN_PERIODIC_NOEVENT, 1, MAC_STATS_IN_PERIOD_MS, period); @@ -949,14 +936,6 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr, rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), NULL, 0, NULL); - if (rc) - goto fail; - - return 0; - -fail: - netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n", - __func__, enable ? "enable" : "disable", rc); return rc; } @@ -966,13 +945,29 @@ void efx_mcdi_mac_start_stats(struct efx_nic *efx) dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID; - efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, - MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0); + efx_mcdi_mac_stats(efx, EFX_STATS_ENABLE, 0); } void efx_mcdi_mac_stop_stats(struct efx_nic *efx) { - efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0); + efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 0); +} + +#define EFX_MAC_STATS_WAIT_US 100 +#define EFX_MAC_STATS_WAIT_ATTEMPTS 10 + +void efx_mcdi_mac_pull_stats(struct efx_nic *efx) +{ + __le64 *dma_stats = efx->stats_buffer.addr; + int attempts = EFX_MAC_STATS_WAIT_ATTEMPTS; + + dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID; + efx_mcdi_mac_stats(efx, EFX_STATS_PULL, 0); + + while (dma_stats[MC_CMD_MAC_GENERATION_END] == + EFX_MC_STATS_GENERATION_INVALID && + attempts-- != 0) + udelay(EFX_MAC_STATS_WAIT_US); } int efx_mcdi_port_probe(struct efx_nic *efx) @@ -1003,7 +998,7 @@ int efx_mcdi_port_probe(struct efx_nic *efx) efx->stats_buffer.addr, (u64)virt_to_phys(efx->stats_buffer.addr)); - efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1); + efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 1); return 0; } diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 542a0d252ae0..af2b8c59a903 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -91,6 +91,7 @@ /* Forward declare Precision Time Protocol (PTP) support structure. */ struct efx_ptp_data; +struct hwtstamp_config; struct efx_self_tests; @@ -287,12 +288,9 @@ struct efx_rx_buffer { * Used to facilitate sharing dma mappings between recycled rx buffers * and those passed up to the kernel. * - * @refcnt: Number of struct efx_rx_buffer's referencing this page. - * When refcnt falls to zero, the page is unmapped for dma * @dma_addr: The dma address of this page. */ struct efx_rx_page_state { - unsigned refcnt; dma_addr_t dma_addr; unsigned int __pad[0] ____cacheline_aligned; @@ -362,10 +360,11 @@ struct efx_rx_queue { unsigned int slow_fill_count; }; -enum efx_rx_alloc_method { - RX_ALLOC_METHOD_AUTO = 0, - RX_ALLOC_METHOD_SKB = 1, - RX_ALLOC_METHOD_PAGE = 2, +enum efx_sync_events_state { + SYNC_EVENTS_DISABLED = 0, + SYNC_EVENTS_QUIESCENT, + SYNC_EVENTS_REQUESTED, + SYNC_EVENTS_VALID, }; /** @@ -407,6 +406,9 @@ enum efx_rx_alloc_method { * by __efx_rx_packet(), if @rx_pkt_n_frags != 0 * @rx_queue: RX queue for this channel * @tx_queue: TX queues for this channel + * @sync_events_state: Current state of sync events on this channel + * @sync_timestamp_major: Major part of the last ptp sync event + * @sync_timestamp_minor: Minor part of the last ptp sync event */ struct efx_channel { struct efx_nic *efx; @@ -445,6 +447,10 @@ struct efx_channel { struct efx_rx_queue rx_queue; struct efx_tx_queue tx_queue[EFX_TXQ_TYPES]; + + enum efx_sync_events_state sync_events_state; + u32 sync_timestamp_major; + u32 sync_timestamp_minor; }; /** @@ -520,15 +526,6 @@ enum nic_state { STATE_RECOVERY = 3, /* device recovering from PCI error */ }; -/* - * Alignment of the skb->head which wraps a page-allocated RX buffer - * - * The skb allocated to wrap an rx_buffer can have this alignment. Since - * the data is memcpy'd from the rx_buf, it does not need to be equal to - * NET_IP_ALIGN. - */ -#define EFX_PAGE_SKB_ALIGN 2 - /* Forward declaration */ struct efx_nic; @@ -651,6 +648,13 @@ struct vfdi_status; * struct efx_nic - an Efx NIC * @name: Device name (net device name or bus id before net device registered) * @pci_dev: The PCI device + * @node: List node for maintaning primary/secondary function lists + * @primary: &struct efx_nic instance for the primary function of this + * controller. May be the same structure, and may be %NULL if no + * primary function is bound. Serialised by rtnl_lock. + * @secondary_list: List of &struct efx_nic instances for the secondary PCI + * functions of the controller, if this is for the primary function. + * Serialised by rtnl_lock. * @type: Controller type attributes * @legacy_irq: IRQ number * @workqueue: Workqueue for port reconfigures and the HW monitor. @@ -694,6 +698,8 @@ struct vfdi_status; * (valid only if @rx_prefix_size != 0; always negative) * @rx_packet_len_offset: Offset of RX packet length from start of packet data * (valid only for NICs that set %EFX_RX_PKT_PREFIX_LEN; always negative) + * @rx_packet_ts_offset: Offset of timestamp from start of packet data + * (valid only if channel->sync_timestamps_enabled; always negative) * @rx_hash_key: Toeplitz hash key for RSS * @rx_indir_table: Indirection table for RSS * @rx_scatter: Scatter mode enabled for receives @@ -763,6 +769,7 @@ struct vfdi_status; * @local_lock: Mutex protecting %local_addr_list and %local_page_list. * @peer_work: Work item to broadcast peer addresses to VMs. * @ptp_data: PTP state data + * @vpd_sn: Serial number read from VPD * @monitor_work: Hardware monitor workitem * @biu_lock: BIU (bus interface unit) lock * @last_irq_cpu: Last CPU to handle a possible test interrupt. This @@ -777,6 +784,9 @@ struct efx_nic { /* The following fields should be written very rarely */ char name[IFNAMSIZ]; + struct list_head node; + struct efx_nic *primary; + struct list_head secondary_list; struct pci_dev *pci_dev; unsigned int port_num; const struct efx_nic_type *type; @@ -828,6 +838,7 @@ struct efx_nic { unsigned int rx_prefix_size; int rx_packet_hash_offset; int rx_packet_len_offset; + int rx_packet_ts_offset; u8 rx_hash_key[40]; u32 rx_indir_table[128]; bool rx_scatter; @@ -852,10 +863,14 @@ struct efx_nic { struct work_struct mac_work; bool port_enabled; + bool mc_bist_for_other_fn; bool port_initialized; struct net_device *net_dev; struct efx_buffer stats_buffer; + u64 rx_nodesc_drops_total; + u64 rx_nodesc_drops_while_down; + bool rx_nodesc_drops_prev_state; unsigned int phy_type; const struct efx_phy_operations *phy_op; @@ -907,6 +922,8 @@ struct efx_nic { struct efx_ptp_data *ptp_data; + char *vpd_sn; + /* The following fields may be written more often */ struct delayed_work monitor_work ____cacheline_aligned_in_smp; @@ -959,6 +976,7 @@ struct efx_mtd_partition { * @update_stats: Update statistics not provided by event handling. * Either argument may be %NULL. * @start_stats: Start the regular fetching of statistics + * @pull_stats: Pull stats from the NIC and wait until they arrive. * @stop_stats: Stop the regular fetching of statistics * @set_id_led: Set state of identifying LED or revert to automatic function * @push_irq_moderation: Apply interrupt moderation value @@ -997,7 +1015,7 @@ struct efx_mtd_partition { * @tx_init: Initialise TX queue on the NIC * @tx_remove: Free resources for TX queue * @tx_write: Write TX descriptors and doorbell - * @rx_push_indir_table: Write RSS indirection table to the NIC + * @rx_push_rss_config: Write RSS hash key and indirection table to the NIC * @rx_probe: Allocate resources for RX queue * @rx_init: Initialise RX queue on the NIC * @rx_remove: Free resources for RX queue @@ -1017,7 +1035,8 @@ struct efx_mtd_partition { * @filter_insert: add or replace a filter * @filter_remove_safe: remove a filter by ID, carefully * @filter_get_safe: retrieve a filter by ID, carefully - * @filter_clear_rx: remove RX filters by priority + * @filter_clear_rx: Remove all RX filters whose priority is less than or + * equal to the given priority and is not %EFX_FILTER_PRI_AUTO * @filter_count_rx_used: Get the number of filters in use at a given priority * @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1 * @filter_get_rx_ids: Get list of RX filters at a given priority @@ -1037,6 +1056,12 @@ struct efx_mtd_partition { * @mtd_sync: Wait for write-back to complete on MTD partition. This * also notifies the driver that a writer has finished using this * partition. + * @ptp_write_host_time: Send host time to MC as part of sync protocol + * @ptp_set_ts_sync_events: Enable or disable sync events for inline RX + * timestamping, possibly only temporarily for the purposes of a reset. + * @ptp_set_ts_config: Set hardware timestamp configuration. The flags + * and tx_type will already have been validated but this operation + * must validate and update rx_filter. * @revision: Hardware architecture revision * @txd_ptr_tbl_base: TX descriptor ring base address * @rxd_ptr_tbl_base: RX descriptor ring base address @@ -1046,6 +1071,7 @@ struct efx_mtd_partition { * @max_dma_mask: Maximum possible DMA mask * @rx_prefix_size: Size of RX prefix before packet data * @rx_hash_offset: Offset of RX flow hash within prefix + * @rx_ts_offset: Offset of timestamp within prefix * @rx_buffer_padding: Size of padding at end of RX packet * @can_rx_scatter: NIC is able to scatter packets to multiple buffers * @always_rx_scatter: NIC will always scatter packets to multiple buffers @@ -1055,6 +1081,7 @@ struct efx_mtd_partition { * @offload_features: net_device feature flags for protocol offload * features implemented in hardware * @mcdi_max_ver: Maximum MCDI version supported + * @hwtstamp_filters: Mask of hardware timestamp filter types supported */ struct efx_nic_type { unsigned int (*mem_map_size)(struct efx_nic *efx); @@ -1077,6 +1104,7 @@ struct efx_nic_type { size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats, struct rtnl_link_stats64 *core_stats); void (*start_stats)(struct efx_nic *efx); + void (*pull_stats)(struct efx_nic *efx); void (*stop_stats)(struct efx_nic *efx); void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode); void (*push_irq_moderation)(struct efx_channel *channel); @@ -1105,7 +1133,7 @@ struct efx_nic_type { void (*tx_init)(struct efx_tx_queue *tx_queue); void (*tx_remove)(struct efx_tx_queue *tx_queue); void (*tx_write)(struct efx_tx_queue *tx_queue); - void (*rx_push_indir_table)(struct efx_nic *efx); + void (*rx_push_rss_config)(struct efx_nic *efx); int (*rx_probe)(struct efx_rx_queue *rx_queue); void (*rx_init)(struct efx_rx_queue *rx_queue); void (*rx_remove)(struct efx_rx_queue *rx_queue); @@ -1130,8 +1158,8 @@ struct efx_nic_type { int (*filter_get_safe)(struct efx_nic *efx, enum efx_filter_priority priority, u32 filter_id, struct efx_filter_spec *); - void (*filter_clear_rx)(struct efx_nic *efx, - enum efx_filter_priority priority); + int (*filter_clear_rx)(struct efx_nic *efx, + enum efx_filter_priority priority); u32 (*filter_count_rx_used)(struct efx_nic *efx, enum efx_filter_priority priority); u32 (*filter_get_rx_id_limit)(struct efx_nic *efx); @@ -1155,6 +1183,9 @@ struct efx_nic_type { int (*mtd_sync)(struct mtd_info *mtd); #endif void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time); + int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp); + int (*ptp_set_ts_config)(struct efx_nic *efx, + struct hwtstamp_config *init); int revision; unsigned int txd_ptr_tbl_base; @@ -1165,6 +1196,7 @@ struct efx_nic_type { u64 max_dma_mask; unsigned int rx_prefix_size; unsigned int rx_hash_offset; + unsigned int rx_ts_offset; unsigned int rx_buffer_padding; bool can_rx_scatter; bool always_rx_scatter; @@ -1173,6 +1205,7 @@ struct efx_nic_type { netdev_features_t offload_features; int mcdi_max_ver; unsigned int max_rx_ip_filters; + u32 hwtstamp_filters; }; /************************************************************************** diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index 9c90bf56090f..79226b19e3c4 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -519,3 +519,15 @@ void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count, } } } + +void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops) +{ + /* if down, or this is the first update after coming up */ + if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state) + efx->rx_nodesc_drops_while_down += + *rx_nodesc_drops - efx->rx_nodesc_drops_total; + efx->rx_nodesc_drops_total = *rx_nodesc_drops; + efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP); + *rx_nodesc_drops -= efx->rx_nodesc_drops_while_down; +} + diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 91c63ec79c5f..a001fae1a8d7 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -412,8 +412,8 @@ enum { EF10_STAT_rx_dp_q_disabled_packets, EF10_STAT_rx_dp_di_dropped_packets, EF10_STAT_rx_dp_streaming_packets, - EF10_STAT_rx_dp_emerg_fetch, - EF10_STAT_rx_dp_emerg_wait, + EF10_STAT_rx_dp_hlb_fetch, + EF10_STAT_rx_dp_hlb_wait, EF10_STAT_COUNT }; @@ -554,12 +554,29 @@ int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf, bool spoofchk); struct ethtool_ts_info; -void efx_ptp_probe(struct efx_nic *efx); -int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd); +int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel); +void efx_ptp_defer_probe_with_channel(struct efx_nic *efx); +void efx_ptp_remove(struct efx_nic *efx); +int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr); +int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr); void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info); bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); +int efx_ptp_get_mode(struct efx_nic *efx); +int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, + unsigned int new_mode); int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev); +size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings); +size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats); +void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev); +void __efx_rx_skb_attach_timestamp(struct efx_channel *channel, + struct sk_buff *skb); +static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel, + struct sk_buff *skb) +{ + if (channel->sync_events_state == SYNC_EVENTS_VALID) + __efx_rx_skb_attach_timestamp(channel, skb); +} void efx_ptp_start_datapath(struct efx_nic *efx); void efx_ptp_stop_datapath(struct efx_nic *efx); @@ -678,8 +695,8 @@ int efx_farch_filter_remove_safe(struct efx_nic *efx, int efx_farch_filter_get_safe(struct efx_nic *efx, enum efx_filter_priority priority, u32 filter_id, struct efx_filter_spec *); -void efx_farch_filter_clear_rx(struct efx_nic *efx, - enum efx_filter_priority priority); +int efx_farch_filter_clear_rx(struct efx_nic *efx, + enum efx_filter_priority priority); u32 efx_farch_filter_count_rx_used(struct efx_nic *efx, enum efx_filter_priority priority); u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx); @@ -747,10 +764,6 @@ int falcon_reset_xaui(struct efx_nic *efx); void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw); void efx_farch_init_common(struct efx_nic *efx); void efx_ef10_handle_drain_event(struct efx_nic *efx); -static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx) -{ - efx->type->rx_push_indir_table(efx); -} void efx_farch_rx_push_indir_table(struct efx_nic *efx); int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, @@ -774,6 +787,7 @@ size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count, const unsigned long *mask, u64 *stats, const void *dma_buf, bool accumulate); +void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat); #define EFX_MAX_FLUSH_TIME 5000 diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 3dd39dcfe36b..eb75fbd11a01 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -62,7 +62,7 @@ #define SYNCHRONISATION_GRANULARITY_NS 200 /* Minimum permitted length of a (corrected) synchronisation time */ -#define MIN_SYNCHRONISATION_NS 120 +#define DEFAULT_MIN_SYNCHRONISATION_NS 120 /* Maximum permitted length of a (corrected) synchronisation time */ #define MAX_SYNCHRONISATION_NS 1000 @@ -195,26 +195,29 @@ struct efx_ptp_event_rx { /** * struct efx_ptp_timeset - Synchronisation between host and MC * @host_start: Host time immediately before hardware timestamp taken - * @seconds: Hardware timestamp, seconds - * @nanoseconds: Hardware timestamp, nanoseconds + * @major: Hardware timestamp, major + * @minor: Hardware timestamp, minor * @host_end: Host time immediately after hardware timestamp taken - * @waitns: Number of nanoseconds between hardware timestamp being read and + * @wait: Number of NIC clock ticks between hardware timestamp being read and * host end time being seen * @window: Difference of host_end and host_start * @valid: Whether this timeset is valid */ struct efx_ptp_timeset { u32 host_start; - u32 seconds; - u32 nanoseconds; + u32 major; + u32 minor; u32 host_end; - u32 waitns; + u32 wait; u32 window; /* Derived: end - start, allowing for wrap */ }; /** * struct efx_ptp_data - Precision Time Protocol (PTP) state - * @channel: The PTP channel + * @efx: The NIC context + * @channel: The PTP channel (Siena only) + * @rx_ts_inline: Flag for whether RX timestamps are inline (else they are + * separate events) * @rxq: Receive queue (awaiting timestamps) * @txq: Transmit queue * @evt_list: List of MC receive events awaiting packets @@ -231,41 +234,42 @@ struct efx_ptp_timeset { * @config: Current timestamp configuration * @enabled: PTP operation enabled * @mode: Mode in which PTP operating (PTP version) + * @time_format: Time format supported by this NIC + * @ns_to_nic_time: Function to convert from scalar nanoseconds to NIC time + * @nic_to_kernel_time: Function to convert from NIC to kernel time + * @min_synchronisation_ns: Minimum acceptable corrected sync window + * @ts_corrections.tx: Required driver correction of transmit timestamps + * @ts_corrections.rx: Required driver correction of receive timestamps + * @ts_corrections.pps_out: PPS output error (information only) + * @ts_corrections.pps_in: Required driver correction of PPS input timestamps * @evt_frags: Partly assembled PTP events * @evt_frag_idx: Current fragment number * @evt_code: Last event code * @start: Address at which MC indicates ready for synchronisation * @host_time_pps: Host time at last PPS - * @last_sync_ns: Last number of nanoseconds between readings when synchronising - * @base_sync_ns: Number of nanoseconds for last synchronisation. - * @base_sync_valid: Whether base_sync_time is valid. * @current_adjfreq: Current ppb adjustment. - * @phc_clock: Pointer to registered phc device + * @phc_clock: Pointer to registered phc device (if primary function) * @phc_clock_info: Registration structure for phc device * @pps_work: pps work task for handling pps events * @pps_workwq: pps work queue * @nic_ts_enabled: Flag indicating if NIC generated TS events are handled * @txbuf: Buffer for use when transmitting (PTP) packets to MC (avoids * allocations in main data path). - * @debug_ptp_dir: PTP debugfs directory - * @missed_rx_sync: Number of packets received without syncrhonisation. * @good_syncs: Number of successful synchronisations. - * @no_time_syncs: Number of synchronisations with no good times. - * @bad_sync_durations: Number of synchronisations with bad durations. + * @fast_syncs: Number of synchronisations requiring short delay * @bad_syncs: Number of failed synchronisations. - * @last_sync_time: Number of nanoseconds for last synchronisation. * @sync_timeouts: Number of synchronisation timeouts - * @fast_syncs: Number of synchronisations requiring short delay - * @min_sync_delta: Minimum time between event and synchronisation - * @max_sync_delta: Maximum time between event and synchronisation - * @average_sync_delta: Average time between event and synchronisation. - * Modified moving average. - * @last_sync_delta: Last time between event and synchronisation - * @mc_stats: Context value for MC statistics + * @no_time_syncs: Number of synchronisations with no good times. + * @invalid_sync_windows: Number of sync windows with bad durations. + * @undersize_sync_windows: Number of corrected sync windows that are too small + * @oversize_sync_windows: Number of corrected sync windows that are too large + * @rx_no_timestamp: Number of packets received without a timestamp. * @timeset: Last set of synchronisation statistics. */ struct efx_ptp_data { + struct efx_nic *efx; struct efx_channel *channel; + bool rx_ts_inline; struct sk_buff_head rxq; struct sk_buff_head txq; struct list_head evt_list; @@ -282,14 +286,22 @@ struct efx_ptp_data { struct hwtstamp_config config; bool enabled; unsigned int mode; + unsigned int time_format; + void (*ns_to_nic_time)(s64 ns, u32 *nic_major, u32 *nic_minor); + ktime_t (*nic_to_kernel_time)(u32 nic_major, u32 nic_minor, + s32 correction); + unsigned int min_synchronisation_ns; + struct { + s32 tx; + s32 rx; + s32 pps_out; + s32 pps_in; + } ts_corrections; efx_qword_t evt_frags[MAX_EVENT_FRAGS]; int evt_frag_idx; int evt_code; struct efx_buffer start; struct pps_event_time host_time_pps; - unsigned last_sync_ns; - unsigned base_sync_ns; - bool base_sync_valid; s64 current_adjfreq; struct ptp_clock *phc_clock; struct ptp_clock_info phc_clock_info; @@ -297,6 +309,16 @@ struct efx_ptp_data { struct workqueue_struct *pps_workwq; bool nic_ts_enabled; MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX); + + unsigned int good_syncs; + unsigned int fast_syncs; + unsigned int bad_syncs; + unsigned int sync_timeouts; + unsigned int no_time_syncs; + unsigned int invalid_sync_windows; + unsigned int undersize_sync_windows; + unsigned int oversize_sync_windows; + unsigned int rx_no_timestamp; struct efx_ptp_timeset timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM]; }; @@ -309,19 +331,263 @@ static int efx_phc_settime(struct ptp_clock_info *ptp, static int efx_phc_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *request, int on); +#define PTP_SW_STAT(ext_name, field_name) \ + { #ext_name, 0, offsetof(struct efx_ptp_data, field_name) } +#define PTP_MC_STAT(ext_name, mcdi_name) \ + { #ext_name, 32, MC_CMD_PTP_OUT_STATUS_STATS_ ## mcdi_name ## _OFST } +static const struct efx_hw_stat_desc efx_ptp_stat_desc[] = { + PTP_SW_STAT(ptp_good_syncs, good_syncs), + PTP_SW_STAT(ptp_fast_syncs, fast_syncs), + PTP_SW_STAT(ptp_bad_syncs, bad_syncs), + PTP_SW_STAT(ptp_sync_timeouts, sync_timeouts), + PTP_SW_STAT(ptp_no_time_syncs, no_time_syncs), + PTP_SW_STAT(ptp_invalid_sync_windows, invalid_sync_windows), + PTP_SW_STAT(ptp_undersize_sync_windows, undersize_sync_windows), + PTP_SW_STAT(ptp_oversize_sync_windows, oversize_sync_windows), + PTP_SW_STAT(ptp_rx_no_timestamp, rx_no_timestamp), + PTP_MC_STAT(ptp_tx_timestamp_packets, TX), + PTP_MC_STAT(ptp_rx_timestamp_packets, RX), + PTP_MC_STAT(ptp_timestamp_packets, TS), + PTP_MC_STAT(ptp_filter_matches, FM), + PTP_MC_STAT(ptp_non_filter_matches, NFM), +}; +#define PTP_STAT_COUNT ARRAY_SIZE(efx_ptp_stat_desc) +static const unsigned long efx_ptp_stat_mask[] = { + [0 ... BITS_TO_LONGS(PTP_STAT_COUNT) - 1] = ~0UL, +}; + +size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings) +{ + if (!efx->ptp_data) + return 0; + + return efx_nic_describe_stats(efx_ptp_stat_desc, PTP_STAT_COUNT, + efx_ptp_stat_mask, strings); +} + +size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_STATUS_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_STATUS_LEN); + size_t i; + int rc; + + if (!efx->ptp_data) + return 0; + + /* Copy software statistics */ + for (i = 0; i < PTP_STAT_COUNT; i++) { + if (efx_ptp_stat_desc[i].dma_width) + continue; + stats[i] = *(unsigned int *)((char *)efx->ptp_data + + efx_ptp_stat_desc[i].offset); + } + + /* Fetch MC statistics. We *must* fill in all statistics or + * risk leaking kernel memory to userland, so if the MCDI + * request fails we pretend we got zeroes. + */ + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_STATUS); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + if (rc) { + netif_err(efx, hw, efx->net_dev, + "MC_CMD_PTP_OP_STATUS failed (%d)\n", rc); + memset(outbuf, 0, sizeof(outbuf)); + } + efx_nic_update_stats(efx_ptp_stat_desc, PTP_STAT_COUNT, + efx_ptp_stat_mask, + stats, _MCDI_PTR(outbuf, 0), false); + + return PTP_STAT_COUNT; +} + +/* For Siena platforms NIC time is s and ns */ +static void efx_ptp_ns_to_s_ns(s64 ns, u32 *nic_major, u32 *nic_minor) +{ + struct timespec ts = ns_to_timespec(ns); + *nic_major = ts.tv_sec; + *nic_minor = ts.tv_nsec; +} + +static ktime_t efx_ptp_s_ns_to_ktime_correction(u32 nic_major, u32 nic_minor, + s32 correction) +{ + ktime_t kt = ktime_set(nic_major, nic_minor); + if (correction >= 0) + kt = ktime_add_ns(kt, (u64)correction); + else + kt = ktime_sub_ns(kt, (u64)-correction); + return kt; +} + +/* To convert from s27 format to ns we multiply then divide by a power of 2. + * For the conversion from ns to s27, the operation is also converted to a + * multiply and shift. + */ +#define S27_TO_NS_SHIFT (27) +#define NS_TO_S27_MULT (((1ULL << 63) + NSEC_PER_SEC / 2) / NSEC_PER_SEC) +#define NS_TO_S27_SHIFT (63 - S27_TO_NS_SHIFT) +#define S27_MINOR_MAX (1 << S27_TO_NS_SHIFT) + +/* For Huntington platforms NIC time is in seconds and fractions of a second + * where the minor register only uses 27 bits in units of 2^-27s. + */ +static void efx_ptp_ns_to_s27(s64 ns, u32 *nic_major, u32 *nic_minor) +{ + struct timespec ts = ns_to_timespec(ns); + u32 maj = ts.tv_sec; + u32 min = (u32)(((u64)ts.tv_nsec * NS_TO_S27_MULT + + (1ULL << (NS_TO_S27_SHIFT - 1))) >> NS_TO_S27_SHIFT); + + /* The conversion can result in the minor value exceeding the maximum. + * In this case, round up to the next second. + */ + if (min >= S27_MINOR_MAX) { + min -= S27_MINOR_MAX; + maj++; + } + + *nic_major = maj; + *nic_minor = min; +} + +static inline ktime_t efx_ptp_s27_to_ktime(u32 nic_major, u32 nic_minor) +{ + u32 ns = (u32)(((u64)nic_minor * NSEC_PER_SEC + + (1ULL << (S27_TO_NS_SHIFT - 1))) >> S27_TO_NS_SHIFT); + return ktime_set(nic_major, ns); +} + +static ktime_t efx_ptp_s27_to_ktime_correction(u32 nic_major, u32 nic_minor, + s32 correction) +{ + /* Apply the correction and deal with carry */ + nic_minor += correction; + if ((s32)nic_minor < 0) { + nic_minor += S27_MINOR_MAX; + nic_major--; + } else if (nic_minor >= S27_MINOR_MAX) { + nic_minor -= S27_MINOR_MAX; + nic_major++; + } + + return efx_ptp_s27_to_ktime(nic_major, nic_minor); +} + +/* Get PTP attributes and set up time conversions */ +static int efx_ptp_get_attributes(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN); + struct efx_ptp_data *ptp = efx->ptp_data; + int rc; + u32 fmt; + size_t out_len; + + /* Get the PTP attributes. If the NIC doesn't support the operation we + * use the default format for compatibility with older NICs i.e. + * seconds and nanoseconds. + */ + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_GET_ATTRIBUTES); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &out_len); + if (rc == 0) + fmt = MCDI_DWORD(outbuf, PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT); + else if (rc == -EINVAL) + fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS; + else + return rc; + + if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION) { + ptp->ns_to_nic_time = efx_ptp_ns_to_s27; + ptp->nic_to_kernel_time = efx_ptp_s27_to_ktime_correction; + } else if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS) { + ptp->ns_to_nic_time = efx_ptp_ns_to_s_ns; + ptp->nic_to_kernel_time = efx_ptp_s_ns_to_ktime_correction; + } else { + return -ERANGE; + } + + ptp->time_format = fmt; + + /* MC_CMD_PTP_OP_GET_ATTRIBUTES is an extended version of an older + * operation MC_CMD_PTP_OP_GET_TIME_FORMAT that also returns a value + * to use for the minimum acceptable corrected synchronization window. + * If we have the extra information store it. For older firmware that + * does not implement the extended command use the default value. + */ + if (rc == 0 && out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN) + ptp->min_synchronisation_ns = + MCDI_DWORD(outbuf, + PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN); + else + ptp->min_synchronisation_ns = DEFAULT_MIN_SYNCHRONISATION_NS; + + return 0; +} + +/* Get PTP timestamp corrections */ +static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN); + int rc; + + /* Get the timestamp corrections from the NIC. If this operation is + * not supported (older NICs) then no correction is required. + */ + MCDI_SET_DWORD(inbuf, PTP_IN_OP, + MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + if (rc == 0) { + efx->ptp_data->ts_corrections.tx = MCDI_DWORD(outbuf, + PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT); + efx->ptp_data->ts_corrections.rx = MCDI_DWORD(outbuf, + PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE); + efx->ptp_data->ts_corrections.pps_out = MCDI_DWORD(outbuf, + PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT); + efx->ptp_data->ts_corrections.pps_in = MCDI_DWORD(outbuf, + PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN); + } else if (rc == -EINVAL) { + efx->ptp_data->ts_corrections.tx = 0; + efx->ptp_data->ts_corrections.rx = 0; + efx->ptp_data->ts_corrections.pps_out = 0; + efx->ptp_data->ts_corrections.pps_in = 0; + } else { + return rc; + } + + return 0; +} + /* Enable MCDI PTP support. */ static int efx_ptp_enable(struct efx_nic *efx) { MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN); + MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0); + int rc; MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE); MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE, - efx->ptp_data->channel->channel); + efx->ptp_data->channel ? + efx->ptp_data->channel->channel : 0); MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode); - return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), - NULL, 0, NULL); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + rc = (rc == -EALREADY) ? 0 : rc; + if (rc) + efx_mcdi_display_error(efx, MC_CMD_PTP, + MC_CMD_PTP_IN_ENABLE_LEN, + outbuf, sizeof(outbuf), rc); + return rc; } /* Disable MCDI PTP support. @@ -332,11 +598,19 @@ static int efx_ptp_enable(struct efx_nic *efx) static int efx_ptp_disable(struct efx_nic *efx) { MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN); + MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0); + int rc; MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE); MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); - return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), - NULL, 0, NULL); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + rc = (rc == -EALREADY) ? 0 : rc; + if (rc) + efx_mcdi_display_error(efx, MC_CMD_PTP, + MC_CMD_PTP_IN_DISABLE_LEN, + outbuf, sizeof(outbuf), rc); + return rc; } static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q) @@ -404,11 +678,10 @@ static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data), unsigned start_ns, end_ns; timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART); - timeset->seconds = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_SECONDS); - timeset->nanoseconds = MCDI_DWORD(data, - PTP_OUT_SYNCHRONIZE_NANOSECONDS); + timeset->major = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MAJOR); + timeset->minor = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MINOR); timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND), - timeset->waitns = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS); + timeset->wait = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS); /* Ignore seconds */ start_ns = timeset->host_start & MC_NANOSECOND_MASK; @@ -437,62 +710,73 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf), MCDI_VAR_ARRAY_LEN(response_length, PTP_OUT_SYNCHRONIZE_TIMESET); unsigned i; - unsigned total; unsigned ngood = 0; unsigned last_good = 0; struct efx_ptp_data *ptp = efx->ptp_data; u32 last_sec; u32 start_sec; struct timespec delta; + ktime_t mc_time; if (number_readings == 0) return -EAGAIN; - /* Read the set of results and increment stats for any results that - * appera to be erroneous. + /* Read the set of results and find the last good host-MC + * synchronization result. The MC times when it finishes reading the + * host time so the corrected window time should be fairly constant + * for a given platform. Increment stats for any results that appear + * to be erroneous. */ for (i = 0; i < number_readings; i++) { + s32 window, corrected; + struct timespec wait; + efx_ptp_read_timeset( MCDI_ARRAY_STRUCT_PTR(synch_buf, PTP_OUT_SYNCHRONIZE_TIMESET, i), &ptp->timeset[i]); - } - /* Find the last good host-MC synchronization result. The MC times - * when it finishes reading the host time so the corrected window time - * should be fairly constant for a given platform. - */ - total = 0; - for (i = 0; i < number_readings; i++) - if (ptp->timeset[i].window > ptp->timeset[i].waitns) { - unsigned win; - - win = ptp->timeset[i].window - ptp->timeset[i].waitns; - if (win >= MIN_SYNCHRONISATION_NS && - win < MAX_SYNCHRONISATION_NS) { - total += ptp->timeset[i].window; - ngood++; - last_good = i; - } + wait = ktime_to_timespec( + ptp->nic_to_kernel_time(0, ptp->timeset[i].wait, 0)); + window = ptp->timeset[i].window; + corrected = window - wait.tv_nsec; + + /* We expect the uncorrected synchronization window to be at + * least as large as the interval between host start and end + * times. If it is smaller than this then this is mostly likely + * to be a consequence of the host's time being adjusted. + * Check that the corrected sync window is in a reasonable + * range. If it is out of range it is likely to be because an + * interrupt or other delay occurred between reading the system + * time and writing it to MC memory. + */ + if (window < SYNCHRONISATION_GRANULARITY_NS) { + ++ptp->invalid_sync_windows; + } else if (corrected >= MAX_SYNCHRONISATION_NS) { + ++ptp->oversize_sync_windows; + } else if (corrected < ptp->min_synchronisation_ns) { + ++ptp->undersize_sync_windows; + } else { + ngood++; + last_good = i; } + } if (ngood == 0) { netif_warn(efx, drv, efx->net_dev, - "PTP no suitable synchronisations %dns\n", - ptp->base_sync_ns); + "PTP no suitable synchronisations\n"); return -EAGAIN; } - /* Average minimum this synchronisation */ - ptp->last_sync_ns = DIV_ROUND_UP(total, ngood); - if (!ptp->base_sync_valid || (ptp->last_sync_ns < ptp->base_sync_ns)) { - ptp->base_sync_valid = true; - ptp->base_sync_ns = ptp->last_sync_ns; - } + /* Convert the NIC time into kernel time. No correction is required- + * this time is the output of a firmware process. + */ + mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major, + ptp->timeset[last_good].minor, 0); /* Calculate delay from actual PPS to last_time */ - delta.tv_nsec = - ptp->timeset[last_good].nanoseconds + + delta = ktime_to_timespec(mc_time); + delta.tv_nsec += last_time->ts_real.tv_nsec - (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK); @@ -553,6 +837,11 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) loops++; } + if (loops <= 1) + ++ptp->fast_syncs; + if (!time_before(jiffies, timeout)) + ++ptp->sync_timeouts; + if (ACCESS_ONCE(*start)) efx_ptp_send_times(efx, &last_time); @@ -561,9 +850,20 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) MC_CMD_PTP_IN_SYNCHRONIZE_LEN, synch_buf, sizeof(synch_buf), &response_length); - if (rc == 0) + if (rc == 0) { rc = efx_ptp_process_times(efx, synch_buf, response_length, &last_time); + if (rc == 0) + ++ptp->good_syncs; + else + ++ptp->no_time_syncs; + } + + /* Increment the bad syncs counter if the synchronize fails, whatever + * the reason. + */ + if (rc != 0) + ++ptp->bad_syncs; return rc; } @@ -602,9 +902,10 @@ static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb) goto fail; memset(×tamps, 0, sizeof(timestamps)); - timestamps.hwtstamp = ktime_set( - MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_SECONDS), - MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_NANOSECONDS)); + timestamps.hwtstamp = ptp_data->nic_to_kernel_time( + MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MAJOR), + MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MINOR), + ptp_data->ts_corrections.tx); skb_tstamp_tx(skb, ×tamps); @@ -622,6 +923,9 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx) struct list_head *cursor; struct list_head *next; + if (ptp->rx_ts_inline) + return; + /* Drop time-expired events */ spin_lock_bh(&ptp->evt_lock); if (!list_empty(&ptp->evt_list)) { @@ -655,6 +959,8 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx, struct efx_ptp_match *match; enum ptp_packet_state rc = PTP_PACKET_STATE_UNMATCHED; + WARN_ON_ONCE(ptp->rx_ts_inline); + spin_lock_bh(&ptp->evt_lock); evts_waiting = !list_empty(&ptp->evt_list); spin_unlock_bh(&ptp->evt_lock); @@ -696,13 +1002,10 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx, /* Process any queued receive events and corresponding packets * * q is returned with all the packets that are ready for delivery. - * true is returned if at least one of those packets requires - * synchronisation. */ -static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q) +static void efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q) { struct efx_ptp_data *ptp = efx->ptp_data; - bool rc = false; struct sk_buff *skb; while ((skb = skb_dequeue(&ptp->rxq))) { @@ -713,13 +1016,10 @@ static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q) __skb_queue_tail(q, skb); } else if (efx_ptp_match_rx(efx, skb) == PTP_PACKET_STATE_MATCHED) { - rc = true; __skb_queue_tail(q, skb); } else if (time_after(jiffies, match->expiry)) { match->state = PTP_PACKET_STATE_TIMED_OUT; - if (net_ratelimit()) - netif_warn(efx, rx_err, efx->net_dev, - "PTP packet - no timestamp seen\n"); + ++ptp->rx_no_timestamp; __skb_queue_tail(q, skb); } else { /* Replace unprocessed entry and stop */ @@ -727,8 +1027,6 @@ static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q) break; } } - - return rc; } /* Complete processing of a received packet */ @@ -739,13 +1037,27 @@ static inline void efx_ptp_process_rx(struct efx_nic *efx, struct sk_buff *skb) local_bh_enable(); } -static int efx_ptp_start(struct efx_nic *efx) +static void efx_ptp_remove_multicast_filters(struct efx_nic *efx) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + + if (ptp->rxfilter_installed) { + efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, + ptp->rxfilter_general); + efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, + ptp->rxfilter_event); + ptp->rxfilter_installed = false; + } +} + +static int efx_ptp_insert_multicast_filters(struct efx_nic *efx) { struct efx_ptp_data *ptp = efx->ptp_data; struct efx_filter_spec rxfilter; int rc; - ptp->reset_required = false; + if (!ptp->channel || ptp->rxfilter_installed) + return 0; /* Must filter on both event and general ports to ensure * that there is no packet re-ordering. @@ -778,23 +1090,37 @@ static int efx_ptp_start(struct efx_nic *efx) goto fail; ptp->rxfilter_general = rc; + ptp->rxfilter_installed = true; + return 0; + +fail: + efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, + ptp->rxfilter_event); + return rc; +} + +static int efx_ptp_start(struct efx_nic *efx) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + int rc; + + ptp->reset_required = false; + + rc = efx_ptp_insert_multicast_filters(efx); + if (rc) + return rc; + rc = efx_ptp_enable(efx); if (rc != 0) - goto fail2; + goto fail; ptp->evt_frag_idx = 0; ptp->current_adjfreq = 0; - ptp->rxfilter_installed = true; return 0; -fail2: - efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, - ptp->rxfilter_general); fail: - efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, - ptp->rxfilter_event); - + efx_ptp_remove_multicast_filters(efx); return rc; } @@ -810,13 +1136,7 @@ static int efx_ptp_stop(struct efx_nic *efx) rc = efx_ptp_disable(efx); - if (ptp->rxfilter_installed) { - efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, - ptp->rxfilter_general); - efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, - ptp->rxfilter_event); - ptp->rxfilter_installed = false; - } + efx_ptp_remove_multicast_filters(efx); /* Make sure RX packets are really delivered */ efx_ptp_deliver_rx_queue(&efx->ptp_data->rxq); @@ -844,7 +1164,7 @@ static void efx_ptp_pps_worker(struct work_struct *work) { struct efx_ptp_data *ptp = container_of(work, struct efx_ptp_data, pps_work); - struct efx_nic *efx = ptp->channel->efx; + struct efx_nic *efx = ptp->efx; struct ptp_clock_event ptp_evt; if (efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS)) @@ -855,13 +1175,11 @@ static void efx_ptp_pps_worker(struct work_struct *work) ptp_clock_event(ptp->phc_clock, &ptp_evt); } -/* Process any pending transmissions and timestamp any received packets. - */ static void efx_ptp_worker(struct work_struct *work) { struct efx_ptp_data *ptp_data = container_of(work, struct efx_ptp_data, work); - struct efx_nic *efx = ptp_data->channel->efx; + struct efx_nic *efx = ptp_data->efx; struct sk_buff *skb; struct sk_buff_head tempq; @@ -874,42 +1192,50 @@ static void efx_ptp_worker(struct work_struct *work) efx_ptp_drop_time_expired_events(efx); __skb_queue_head_init(&tempq); - if (efx_ptp_process_events(efx, &tempq) || - !skb_queue_empty(&ptp_data->txq)) { + efx_ptp_process_events(efx, &tempq); - while ((skb = skb_dequeue(&ptp_data->txq))) - efx_ptp_xmit_skb(efx, skb); - } + while ((skb = skb_dequeue(&ptp_data->txq))) + efx_ptp_xmit_skb(efx, skb); while ((skb = __skb_dequeue(&tempq))) efx_ptp_process_rx(efx, skb); } -/* Initialise PTP channel and state. - * - * Setting core_index to zero causes the queue to be initialised and doesn't - * overlap with 'rxq0' because ptp.c doesn't use skb_record_rx_queue. - */ -static int efx_ptp_probe_channel(struct efx_channel *channel) +static const struct ptp_clock_info efx_phc_clock_info = { + .owner = THIS_MODULE, + .name = "sfc", + .max_adj = MAX_PPB, + .n_alarm = 0, + .n_ext_ts = 0, + .n_per_out = 0, + .pps = 1, + .adjfreq = efx_phc_adjfreq, + .adjtime = efx_phc_adjtime, + .gettime = efx_phc_gettime, + .settime = efx_phc_settime, + .enable = efx_phc_enable, +}; + +/* Initialise PTP state. */ +int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel) { - struct efx_nic *efx = channel->efx; struct efx_ptp_data *ptp; int rc = 0; unsigned int pos; - channel->irq_moderation = 0; - channel->rx_queue.core_index = 0; - ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL); efx->ptp_data = ptp; if (!efx->ptp_data) return -ENOMEM; + ptp->efx = efx; + ptp->channel = channel; + ptp->rx_ts_inline = efx_nic_rev(efx) >= EFX_REV_HUNT_A0; + rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL); if (rc != 0) goto fail1; - ptp->channel = channel; skb_queue_head_init(&ptp->rxq); skb_queue_head_init(&ptp->txq); ptp->workwq = create_singlethread_workqueue("sfc_ptp"); @@ -929,33 +1255,32 @@ static int efx_ptp_probe_channel(struct efx_channel *channel) list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list); ptp->evt_overflow = false; - ptp->phc_clock_info.owner = THIS_MODULE; - snprintf(ptp->phc_clock_info.name, - sizeof(ptp->phc_clock_info.name), - "%pm", efx->net_dev->perm_addr); - ptp->phc_clock_info.max_adj = MAX_PPB; - ptp->phc_clock_info.n_alarm = 0; - ptp->phc_clock_info.n_ext_ts = 0; - ptp->phc_clock_info.n_per_out = 0; - ptp->phc_clock_info.pps = 1; - ptp->phc_clock_info.adjfreq = efx_phc_adjfreq; - ptp->phc_clock_info.adjtime = efx_phc_adjtime; - ptp->phc_clock_info.gettime = efx_phc_gettime; - ptp->phc_clock_info.settime = efx_phc_settime; - ptp->phc_clock_info.enable = efx_phc_enable; - - ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info, - &efx->pci_dev->dev); - if (IS_ERR(ptp->phc_clock)) { - rc = PTR_ERR(ptp->phc_clock); + /* Get the NIC PTP attributes and set up time conversions */ + rc = efx_ptp_get_attributes(efx); + if (rc < 0) goto fail3; - } - INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker); - ptp->pps_workwq = create_singlethread_workqueue("sfc_pps"); - if (!ptp->pps_workwq) { - rc = -ENOMEM; - goto fail4; + /* Get the timestamp corrections */ + rc = efx_ptp_get_timestamp_corrections(efx); + if (rc < 0) + goto fail3; + + if (efx->mcdi->fn_flags & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) { + ptp->phc_clock_info = efx_phc_clock_info; + ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info, + &efx->pci_dev->dev); + if (IS_ERR(ptp->phc_clock)) { + rc = PTR_ERR(ptp->phc_clock); + goto fail3; + } + + INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker); + ptp->pps_workwq = create_singlethread_workqueue("sfc_pps"); + if (!ptp->pps_workwq) { + rc = -ENOMEM; + goto fail4; + } } ptp->nic_ts_enabled = false; @@ -976,14 +1301,27 @@ fail1: return rc; } -static void efx_ptp_remove_channel(struct efx_channel *channel) +/* Initialise PTP channel. + * + * Setting core_index to zero causes the queue to be initialised and doesn't + * overlap with 'rxq0' because ptp.c doesn't use skb_record_rx_queue. + */ +static int efx_ptp_probe_channel(struct efx_channel *channel) { struct efx_nic *efx = channel->efx; + channel->irq_moderation = 0; + channel->rx_queue.core_index = 0; + + return efx_ptp_probe(efx, channel); +} + +void efx_ptp_remove(struct efx_nic *efx) +{ if (!efx->ptp_data) return; - (void)efx_ptp_disable(channel->efx); + (void)efx_ptp_disable(efx); cancel_work_sync(&efx->ptp_data->work); cancel_work_sync(&efx->ptp_data->pps_work); @@ -991,15 +1329,22 @@ static void efx_ptp_remove_channel(struct efx_channel *channel) skb_queue_purge(&efx->ptp_data->rxq); skb_queue_purge(&efx->ptp_data->txq); - ptp_clock_unregister(efx->ptp_data->phc_clock); + if (efx->ptp_data->phc_clock) { + destroy_workqueue(efx->ptp_data->pps_workwq); + ptp_clock_unregister(efx->ptp_data->phc_clock); + } destroy_workqueue(efx->ptp_data->workwq); - destroy_workqueue(efx->ptp_data->pps_workwq); efx_nic_free_buffer(efx, &efx->ptp_data->start); kfree(efx->ptp_data); } +static void efx_ptp_remove_channel(struct efx_channel *channel) +{ + efx_ptp_remove(channel->efx); +} + static void efx_ptp_get_channel_name(struct efx_channel *channel, char *buf, size_t len) { @@ -1080,14 +1425,8 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) /* Does this packet require timestamping? */ if (ntohs(*(__be16 *)&skb->data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) { - struct skb_shared_hwtstamps *timestamps; - match->state = PTP_PACKET_STATE_UNMATCHED; - /* Clear all timestamps held: filled in later */ - timestamps = skb_hwtstamps(skb); - memset(timestamps, 0, sizeof(*timestamps)); - /* We expect the sequence number to be in the same position in * the packet for PTP V1 and V2 */ @@ -1132,8 +1471,13 @@ int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) return NETDEV_TX_OK; } -static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, - unsigned int new_mode) +int efx_ptp_get_mode(struct efx_nic *efx) +{ + return efx->ptp_data->mode; +} + +int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, + unsigned int new_mode) { if ((enable_wanted != efx->ptp_data->enabled) || (enable_wanted && (efx->ptp_data->mode != new_mode))) { @@ -1177,8 +1521,6 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init) { - bool enable_wanted = false; - unsigned int new_mode; int rc; if (init->flags) @@ -1188,63 +1530,20 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init) (init->tx_type != HWTSTAMP_TX_ON)) return -ERANGE; - new_mode = efx->ptp_data->mode; - /* Determine whether any PTP HW operations are required */ - switch (init->rx_filter) { - case HWTSTAMP_FILTER_NONE: - break; - case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; - new_mode = MC_CMD_PTP_MODE_V1; - enable_wanted = true; - break; - case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - /* Although these three are accepted only IPV4 packets will be - * timestamped - */ - init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; - new_mode = MC_CMD_PTP_MODE_V2_ENHANCED; - enable_wanted = true; - break; - case HWTSTAMP_FILTER_PTP_V2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: - /* Non-IP + IPv6 timestamping not supported */ - return -ERANGE; - break; - default: - return -ERANGE; - } - - if (init->tx_type != HWTSTAMP_TX_OFF) - enable_wanted = true; - - /* Old versions of the firmware do not support the improved - * UUID filtering option (SF bug 33070). If the firmware does - * not accept the enhanced mode, fall back to the standard PTP - * v2 UUID filtering. - */ - rc = efx_ptp_change_mode(efx, enable_wanted, new_mode); - if ((rc != 0) && (new_mode == MC_CMD_PTP_MODE_V2_ENHANCED)) - rc = efx_ptp_change_mode(efx, enable_wanted, MC_CMD_PTP_MODE_V2); - if (rc != 0) + rc = efx->type->ptp_set_ts_config(efx, init); + if (rc) return rc; efx->ptp_data->config = *init; - return 0; } void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info) { struct efx_ptp_data *ptp = efx->ptp_data; + struct efx_nic *primary = efx->primary; + + ASSERT_RTNL(); if (!ptp) return; @@ -1252,18 +1551,14 @@ void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info) ts_info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE); - ts_info->phc_index = ptp_clock_index(ptp->phc_clock); + if (primary && primary->ptp_data && primary->ptp_data->phc_clock) + ts_info->phc_index = + ptp_clock_index(primary->ptp_data->phc_clock); ts_info->tx_types = 1 << HWTSTAMP_TX_OFF | 1 << HWTSTAMP_TX_ON; - ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE | - 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT | - 1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC | - 1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ | - 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT | - 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC | - 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ); + ts_info->rx_filters = ptp->efx->type->hwtstamp_filters; } -int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd) +int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr) { struct hwtstamp_config config; int rc; @@ -1283,6 +1578,15 @@ int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd) ? -EFAULT : 0; } +int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr) +{ + if (!efx->ptp_data) + return -EOPNOTSUPP; + + return copy_to_user(ifr->ifr_data, &efx->ptp_data->config, + sizeof(efx->ptp_data->config)) ? -EFAULT : 0; +} + static void ptp_event_failure(struct efx_nic *efx, int expected_frag_len) { struct efx_ptp_data *ptp = efx->ptp_data; @@ -1302,6 +1606,9 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp) { struct efx_ptp_event_rx *evt = NULL; + if (WARN_ON_ONCE(ptp->rx_ts_inline)) + return; + if (ptp->evt_frag_idx != 3) { ptp_event_failure(efx, 3); return; @@ -1320,9 +1627,10 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp) MCDI_EVENT_SRC) << 8) | (EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_SRC) << 16)); - evt->hwtimestamp = ktime_set( + evt->hwtimestamp = efx->ptp_data->nic_to_kernel_time( EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA), - EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA)); + EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA), + ptp->ts_corrections.rx); evt->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS); list_add_tail(&evt->link, &ptp->evt_list); @@ -1397,12 +1705,99 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev) } } +void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev) +{ + channel->sync_timestamp_major = MCDI_EVENT_FIELD(*ev, PTP_TIME_MAJOR); + channel->sync_timestamp_minor = + MCDI_EVENT_FIELD(*ev, PTP_TIME_MINOR_26_19) << 19; + /* if sync events have been disabled then we want to silently ignore + * this event, so throw away result. + */ + (void) cmpxchg(&channel->sync_events_state, SYNC_EVENTS_REQUESTED, + SYNC_EVENTS_VALID); +} + +/* make some assumptions about the time representation rather than abstract it, + * since we currently only support one type of inline timestamping and only on + * EF10. + */ +#define MINOR_TICKS_PER_SECOND 0x8000000 +/* Fuzz factor for sync events to be out of order with RX events */ +#define FUZZ (MINOR_TICKS_PER_SECOND / 10) +#define EXPECTED_SYNC_EVENTS_PER_SECOND 4 + +static inline u32 efx_rx_buf_timestamp_minor(struct efx_nic *efx, const u8 *eh) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_ts_offset)); +#else + const u8 *data = eh + efx->rx_packet_ts_offset; + return (u32)data[0] | + (u32)data[1] << 8 | + (u32)data[2] << 16 | + (u32)data[3] << 24; +#endif +} + +void __efx_rx_skb_attach_timestamp(struct efx_channel *channel, + struct sk_buff *skb) +{ + struct efx_nic *efx = channel->efx; + u32 pkt_timestamp_major, pkt_timestamp_minor; + u32 diff, carry; + struct skb_shared_hwtstamps *timestamps; + + pkt_timestamp_minor = (efx_rx_buf_timestamp_minor(efx, + skb_mac_header(skb)) + + (u32) efx->ptp_data->ts_corrections.rx) & + (MINOR_TICKS_PER_SECOND - 1); + + /* get the difference between the packet and sync timestamps, + * modulo one second + */ + diff = (pkt_timestamp_minor - channel->sync_timestamp_minor) & + (MINOR_TICKS_PER_SECOND - 1); + /* do we roll over a second boundary and need to carry the one? */ + carry = channel->sync_timestamp_minor + diff > MINOR_TICKS_PER_SECOND ? + 1 : 0; + + if (diff <= MINOR_TICKS_PER_SECOND / EXPECTED_SYNC_EVENTS_PER_SECOND + + FUZZ) { + /* packet is ahead of the sync event by a quarter of a second or + * less (allowing for fuzz) + */ + pkt_timestamp_major = channel->sync_timestamp_major + carry; + } else if (diff >= MINOR_TICKS_PER_SECOND - FUZZ) { + /* packet is behind the sync event but within the fuzz factor. + * This means the RX packet and sync event crossed as they were + * placed on the event queue, which can sometimes happen. + */ + pkt_timestamp_major = channel->sync_timestamp_major - 1 + carry; + } else { + /* it's outside tolerance in both directions. this might be + * indicative of us missing sync events for some reason, so + * we'll call it an error rather than risk giving a bogus + * timestamp. + */ + netif_vdbg(efx, drv, efx->net_dev, + "packet timestamp %x too far from sync event %x:%x\n", + pkt_timestamp_minor, channel->sync_timestamp_major, + channel->sync_timestamp_minor); + return; + } + + /* attach the timestamps to the skb */ + timestamps = skb_hwtstamps(skb); + timestamps->hwtstamp = + efx_ptp_s27_to_ktime(pkt_timestamp_major, pkt_timestamp_minor); +} + static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) { struct efx_ptp_data *ptp_data = container_of(ptp, struct efx_ptp_data, phc_clock_info); - struct efx_nic *efx = ptp_data->channel->efx; + struct efx_nic *efx = ptp_data->efx; MCDI_DECLARE_BUF(inadj, MC_CMD_PTP_IN_ADJUST_LEN); s64 adjustment_ns; int rc; @@ -1432,18 +1827,20 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) { + u32 nic_major, nic_minor; struct efx_ptp_data *ptp_data = container_of(ptp, struct efx_ptp_data, phc_clock_info); - struct efx_nic *efx = ptp_data->channel->efx; - struct timespec delta_ts = ns_to_timespec(delta); + struct efx_nic *efx = ptp_data->efx; MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ADJUST_LEN); + efx->ptp_data->ns_to_nic_time(delta, &nic_major, &nic_minor); + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, ptp_data->current_adjfreq); - MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec); - MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec); + MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MAJOR, nic_major); + MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MINOR, nic_minor); return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), NULL, 0, NULL); } @@ -1453,10 +1850,11 @@ static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts) struct efx_ptp_data *ptp_data = container_of(ptp, struct efx_ptp_data, phc_clock_info); - struct efx_nic *efx = ptp_data->channel->efx; + struct efx_nic *efx = ptp_data->efx; MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_READ_NIC_TIME_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_READ_NIC_TIME_LEN); int rc; + ktime_t kt; MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME); MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); @@ -1466,8 +1864,10 @@ static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts) if (rc != 0) return rc; - ts->tv_sec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_SECONDS); - ts->tv_nsec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_NANOSECONDS); + kt = ptp_data->nic_to_kernel_time( + MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MAJOR), + MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MINOR), 0); + *ts = ktime_to_timespec(kt); return 0; } @@ -1519,7 +1919,7 @@ static const struct efx_channel_type efx_ptp_channel_type = { .keep_eventq = false, }; -void efx_ptp_probe(struct efx_nic *efx) +void efx_ptp_defer_probe_with_channel(struct efx_nic *efx) { /* Check whether PTP is implemented on this NIC. The DISABLE * operation will succeed if and only if it is implemented. @@ -1533,9 +1933,15 @@ void efx_ptp_start_datapath(struct efx_nic *efx) { if (efx_ptp_restart(efx)) netif_err(efx, drv, efx->net_dev, "Failed to restart PTP.\n"); + /* re-enable timestamping if it was previously enabled */ + if (efx->type->ptp_set_ts_sync_events) + efx->type->ptp_set_ts_sync_events(efx, true, true); } void efx_ptp_stop_datapath(struct efx_nic *efx) { + /* temporarily disable timestamping */ + if (efx->type->ptp_set_ts_sync_events) + efx->type->ptp_set_ts_sync_events(efx, false, true); efx_ptp_stop(efx); } diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 42488df1f4ec..48588ddf81b0 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -149,7 +149,7 @@ static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue) * 0 on success. If a single page can be used for multiple buffers, * then the page will either be inserted fully, or not at all. */ -static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue) +static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic) { struct efx_nic *efx = rx_queue->efx; struct efx_rx_buffer *rx_buf; @@ -163,7 +163,8 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue) do { page = efx_reuse_page(rx_queue); if (page == NULL) { - page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, + page = alloc_pages(__GFP_COLD | __GFP_COMP | + (atomic ? GFP_ATOMIC : GFP_KERNEL), efx->rx_buffer_order); if (unlikely(page == NULL)) return -ENOMEM; @@ -321,7 +322,7 @@ static void efx_discard_rx_packet(struct efx_channel *channel, * this means this function must run from the NAPI handler, or be called * when NAPI is disabled. */ -void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) +void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic) { struct efx_nic *efx = rx_queue->efx; unsigned int fill_level, batch_size; @@ -354,7 +355,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) do { - rc = efx_init_rx_buffers(rx_queue); + rc = efx_init_rx_buffers(rx_queue, atomic); if (unlikely(rc)) { /* Ensure that we don't leave the rx queue empty */ if (rx_queue->added_count == rx_queue->removed_count) @@ -439,7 +440,8 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, } if (efx->net_dev->features & NETIF_F_RXHASH) - skb->rxhash = efx_rx_buf_hash(efx, eh); + skb_set_hash(skb, efx_rx_buf_hash(efx, eh), + PKT_HASH_TYPE_L3); skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE); @@ -475,14 +477,18 @@ static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel, struct sk_buff *skb; /* Allocate an SKB to store the headers */ - skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN); + skb = netdev_alloc_skb(efx->net_dev, + efx->rx_ip_align + efx->rx_prefix_size + + hdr_len); if (unlikely(skb == NULL)) return NULL; EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len); - skb_reserve(skb, EFX_PAGE_SKB_ALIGN); - memcpy(__skb_put(skb, hdr_len), eh, hdr_len); + memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size, + efx->rx_prefix_size + hdr_len); + skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size); + __skb_put(skb, hdr_len); /* Append the remaining page(s) onto the frag list */ if (rx_buf->len > hdr_len) { @@ -619,6 +625,8 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh, if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) skb->ip_summed = CHECKSUM_UNNECESSARY; + efx_rx_skb_attach_timestamp(channel, skb); + if (channel->type->receive_skb) if (channel->type->receive_skb(channel, skb)) return; diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c index 144bbff5a4ae..26641817a9c7 100644 --- a/drivers/net/ethernet/sfc/selftest.c +++ b/drivers/net/ethernet/sfc/selftest.c @@ -722,7 +722,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, return rc_reset; } - if ((tests->registers < 0) && !rc_test) + if ((tests->memory < 0 || tests->registers < 0) && !rc_test) rc_test = -EIO; } diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h index a2f4a06ffa4e..009dbe88f3be 100644 --- a/drivers/net/ethernet/sfc/selftest.h +++ b/drivers/net/ethernet/sfc/selftest.h @@ -38,6 +38,7 @@ struct efx_self_tests { int eventq_dma[EFX_MAX_CHANNELS]; int eventq_int[EFX_MAX_CHANNELS]; /* offline tests */ + int memory; int registers; int phy_ext[EFX_MAX_PHY_TESTS]; struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1]; diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index d034bcd124ef..23f3a6f7737a 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -118,6 +118,54 @@ out: /************************************************************************** * + * PTP + * + ************************************************************************** + */ + +static void siena_ptp_write_host_time(struct efx_nic *efx, u32 host_time) +{ + _efx_writed(efx, cpu_to_le32(host_time), + FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST); +} + +static int siena_ptp_set_ts_config(struct efx_nic *efx, + struct hwtstamp_config *init) +{ + int rc; + + switch (init->rx_filter) { + case HWTSTAMP_FILTER_NONE: + /* if TX timestamping is still requested then leave PTP on */ + return efx_ptp_change_mode(efx, + init->tx_type != HWTSTAMP_TX_OFF, + efx_ptp_get_mode(efx)); + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + return efx_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V1); + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + rc = efx_ptp_change_mode(efx, true, + MC_CMD_PTP_MODE_V2_ENHANCED); + /* bug 33070 - old versions of the firmware do not support the + * improved UUID filtering option. Similarly old versions of the + * application do not expect it to be enabled. If the firmware + * does not accept the enhanced mode, fall back to the standard + * PTP v2 UUID filtering. */ + if (rc != 0) + rc = efx_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V2); + return rc; + default: + return -ERANGE; + } +} + +/************************************************************************** + * * Device reset * ************************************************************************** @@ -259,7 +307,7 @@ static int siena_probe_nic(struct efx_nic *efx) goto fail5; efx_sriov_probe(efx); - efx_ptp_probe(efx); + efx_ptp_defer_probe_with_channel(efx); return 0; @@ -273,6 +321,31 @@ fail1: return rc; } +static void siena_rx_push_rss_config(struct efx_nic *efx) +{ + efx_oword_t temp; + + /* Set hash key for IPv4 */ + memcpy(&temp, efx->rx_hash_key, sizeof(temp)); + efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); + + /* Enable IPv6 RSS */ + BUILD_BUG_ON(sizeof(efx->rx_hash_key) < + 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 || + FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0); + memcpy(&temp, efx->rx_hash_key, sizeof(temp)); + efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1); + memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp)); + efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2); + EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1, + FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1); + memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp), + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); + efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); + + efx_farch_rx_push_indir_table(efx); +} + /* This call performs hardware-specific global initialisation, such as * defining the descriptor cache sizes and number of RSS channels. * It does not set up any buffers, descriptor rings or event queues. @@ -313,23 +386,7 @@ static int siena_init_nic(struct efx_nic *efx) EFX_RX_USR_BUF_SIZE >> 5); efx_writeo(efx, &temp, FR_AZ_RX_CFG); - /* Set hash key for IPv4 */ - memcpy(&temp, efx->rx_hash_key, sizeof(temp)); - efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); - - /* Enable IPv6 RSS */ - BUILD_BUG_ON(sizeof(efx->rx_hash_key) < - 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 || - FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0); - memcpy(&temp, efx->rx_hash_key, sizeof(temp)); - efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1); - memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp)); - efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2); - EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1, - FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1); - memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp), - FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); - efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); + siena_rx_push_rss_config(efx); /* Enable event logging */ rc = efx_mcdi_log_ctrl(efx, true, false, 0); @@ -458,6 +515,8 @@ static int siena_try_update_nic_stats(struct efx_nic *efx) return -EAGAIN; /* Update derived statistics */ + efx_nic_fix_nodesc_drop_stat(efx, + &stats[SIENA_STAT_rx_nodesc_drop_cnt]); efx_update_diff_stat(&stats[SIENA_STAT_tx_good_bytes], stats[SIENA_STAT_tx_bytes] - stats[SIENA_STAT_tx_bad_bytes]); @@ -837,19 +896,6 @@ fail: /************************************************************************** * - * PTP - * - ************************************************************************** - */ - -static void siena_ptp_write_host_time(struct efx_nic *efx, u32 host_time) -{ - _efx_writed(efx, cpu_to_le32(host_time), - FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST); -} - -/************************************************************************** - * * Revision-dependent attributes used by efx.c and nic.c * ************************************************************************** @@ -878,6 +924,7 @@ const struct efx_nic_type siena_a0_nic_type = { .describe_stats = siena_describe_nic_stats, .update_stats = siena_update_nic_stats, .start_stats = efx_mcdi_mac_start_stats, + .pull_stats = efx_mcdi_mac_pull_stats, .stop_stats = efx_mcdi_mac_stop_stats, .set_id_led = efx_mcdi_set_id_led, .push_irq_moderation = siena_push_irq_moderation, @@ -902,7 +949,7 @@ const struct efx_nic_type siena_a0_nic_type = { .tx_init = efx_farch_tx_init, .tx_remove = efx_farch_tx_remove, .tx_write = efx_farch_tx_write, - .rx_push_indir_table = efx_farch_rx_push_indir_table, + .rx_push_rss_config = siena_rx_push_rss_config, .rx_probe = efx_farch_rx_probe, .rx_init = efx_farch_rx_init, .rx_remove = efx_farch_rx_remove, @@ -939,6 +986,7 @@ const struct efx_nic_type siena_a0_nic_type = { .mtd_sync = efx_mcdi_mtd_sync, #endif .ptp_write_host_time = siena_ptp_write_host_time, + .ptp_set_ts_config = siena_ptp_set_ts_config, .revision = EFX_REV_SIENA_A0, .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, @@ -957,4 +1005,11 @@ const struct efx_nic_type siena_a0_nic_type = { NETIF_F_RXHASH | NETIF_F_NTUPLE), .mcdi_max_ver = 1, .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS, + .hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE | + 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT | + 1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC | + 1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ | + 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT | + 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC | + 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ), }; |