summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/intel/iwlwifi/pcie
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2025-05-04 13:26:16 +0300
committerMiri Korenblit <miriam.rachel.korenblit@intel.com>2025-05-07 06:08:00 +0300
commit08e77d5edf7063e206096474ead1ed210eae0338 (patch)
treee633651c502e323c04f28ff46f3bcfbb1879b8fa /drivers/net/wireless/intel/iwlwifi/pcie
parentd586137848e32c3754e057bb7a192da2fa59ef82 (diff)
wifi: iwlwifi: rework transport configuration
Instead of having a trans_configure method that copies all the data, just have the users set up the configuration in the transport directly. This simplifies the code on both sides. While doing so also move some value from the trans struct into the conf struct because they are configuration. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com> Link: https://patch.msgid.link/20250504132447.e2a2535ecfd0.I21653103ff02afc5a4d97a41b68021f053985e37@changeid
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/pcie')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c62
8 files changed, 65 insertions, 118 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index b3a04af7faad..195f3ea9d381 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -112,7 +112,7 @@ int iwl_pcie_ctxt_info_gen3_alloc(struct iwl_trans *trans,
int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
trans->cfg->min_txq_size);
- switch (trans_pcie->rx_buf_size) {
+ switch (trans->conf.rx_buf_size) {
case IWL_AMSDU_DEF:
return -EINVAL;
case IWL_AMSDU_2K:
@@ -192,7 +192,6 @@ int iwl_pcie_ctxt_info_gen3_alloc(struct iwl_trans *trans,
if (ret)
goto err_free_prph_scratch;
-
/* Allocate prph information
* currently we don't assign to the prph info anything, but it would get
* assigned later
@@ -249,7 +248,7 @@ int iwl_pcie_ctxt_info_gen3_alloc(struct iwl_trans *trans,
ctxt_info_gen3->cr_tail_idx_arr_base_addr =
cpu_to_le64(trans_pcie->prph_info_dma_addr + 3 * PAGE_SIZE / 4);
ctxt_info_gen3->mtr_base_addr =
- cpu_to_le64(trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]->dma_addr);
+ cpu_to_le64(trans_pcie->txqs.txq[trans->conf.cmd_queue]->dma_addr);
ctxt_info_gen3->mcr_base_addr =
cpu_to_le64(trans_pcie->rxq->used_bd_dma);
ctxt_info_gen3->mtr_size =
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index 4fd3855e6a34..cc3e3d91b27f 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -184,7 +184,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
/* size is in DWs */
ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4);
- switch (trans_pcie->rx_buf_size) {
+ switch (trans->conf.rx_buf_size) {
case IWL_AMSDU_2K:
rb_size = IWL_CTXT_INFO_RB_SIZE_2K;
break;
@@ -218,7 +218,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
/* initialize TX command queue */
ctxt_info->hcmd_cfg.cmd_queue_addr =
- cpu_to_le64(trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]->dma_addr);
+ cpu_to_le64(trans_pcie->txqs.txq[trans->conf.cmd_queue]->dma_addr);
ctxt_info->hcmd_cfg.cmd_queue_size =
TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index f532a5d6576e..d580e2ad7520 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -289,20 +289,14 @@ enum iwl_pcie_imr_status {
/**
* struct iwl_pcie_txqs - TX queues data
*
- * @page_offs: offset from skb->cb to mac header page pointer
- * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
* @queue_used: bit mask of used queues
* @queue_stopped: bit mask of stopped queues
* @txq: array of TXQ data structures representing the TXQs
* @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
- * @queue_alloc_cmd_ver: queue allocation command version
* @bc_pool: bytecount DMA allocations pool
* @bc_tbl_size: bytecount table size
* @tso_hdr_page: page allocated (per CPU) for A-MSDU headers when doing TSO
* (and similar usage)
- * @cmd: command queue data
- * @cmd.fifo: FIFO number
- * @cmd.q_id: queue ID
* @tfd: TFD data
* @tfd.max_tbs: max number of buffers per TFD
* @tfd.size: TFD size
@@ -314,24 +308,15 @@ struct iwl_pcie_txqs {
struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
struct dma_pool *bc_pool;
size_t bc_tbl_size;
- u8 page_offs;
- u8 dev_cmd_offs;
struct iwl_tso_hdr_page __percpu *tso_hdr_page;
struct {
- u8 fifo;
- u8 q_id;
- } cmd;
-
- struct {
u8 max_tbs;
u16 size;
u8 addr_size;
} tfd;
struct iwl_dma_ptr scd_bc_tbls;
-
- u8 queue_alloc_cmd_ver;
};
/**
@@ -361,9 +346,6 @@ struct iwl_pcie_txqs {
* @hw_base: pci hardware address support
* @ucode_write_complete: indicates that the ucode has been copied.
* @ucode_write_waitq: wait queue for uCode load
- * @cmd_queue - command queue number
- * @rx_buf_size: Rx buffer size
- * @scd_set_active: should the transport configure the SCD for HCMD queue
* @rx_page_order: page order for receive buffer size
* @rx_buf_bytes: RX buffer (RB) size in bytes
* @reg_lock: protect hw register access
@@ -404,13 +386,9 @@ struct iwl_pcie_txqs {
* @pcie_dbg_dumped_once: indicates PCIe regs were dumped already
* @opmode_down: indicates opmode went away
* @num_rx_bufs: number of RX buffers to allocate/use
- * @no_reclaim_cmds: special commands not using reclaim flow
- * (firmware workaround)
- * @n_no_reclaim_cmds: number of special commands not using reclaim flow
* @affinity_mask: IRQ affinity mask for each RX queue
* @debug_rfkill: RF-kill debugging state, -1 for unset, 0/1 for radio
* enable/disable
- * @fw_reset_handshake: indicates FW reset handshake is needed
* @fw_reset_state: state of FW reset handshake
* @fw_reset_waitq: waitqueue for FW reset handshake
* @is_down: indicates the NIC is down
@@ -474,12 +452,8 @@ struct iwl_trans_pcie {
wait_queue_head_t ucode_write_waitq;
wait_queue_head_t sx_waitq;
- u8 n_no_reclaim_cmds;
- u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
u16 num_rx_bufs;
- enum iwl_amsdu_size rx_buf_size;
- bool scd_set_active;
bool pcie_dbg_dumped_once;
u32 rx_page_order;
u32 rx_buf_bytes;
@@ -514,7 +488,6 @@ struct iwl_trans_pcie {
void *base_rb_stts;
dma_addr_t base_rb_stts_dma;
- bool fw_reset_handshake;
enum iwl_pcie_fw_reset_state fw_reset_state;
wait_queue_head_t fw_reset_waitq;
enum iwl_pcie_imr_status imr_status;
@@ -1100,8 +1073,7 @@ static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
void iwl_pcie_rx_allocator_work(struct work_struct *data);
/* common trans ops for all generations transports */
-void iwl_trans_pcie_configure(struct iwl_trans *trans,
- const struct iwl_trans_config *trans_cfg);
+void iwl_trans_pcie_op_mode_enter(struct iwl_trans *trans);
int iwl_trans_pcie_start_hw(struct iwl_trans *trans);
void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans);
void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index cb8c8973cb18..712b8ded9f4b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -363,8 +363,8 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
u32 *offset, gfp_t priority)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order;
+ unsigned int rbsize = trans_pcie->rx_buf_bytes;
struct page *page;
gfp_t gfp_mask = priority;
@@ -835,11 +835,10 @@ err:
static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 rb_size;
const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
- switch (trans_pcie->rx_buf_size) {
+ switch (trans->conf.rx_buf_size) {
case IWL_AMSDU_4K:
rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
break;
@@ -907,7 +906,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
u32 rb_size, enabled = 0;
int i;
- switch (trans_pcie->rx_buf_size) {
+ switch (trans->conf.rx_buf_size) {
case IWL_AMSDU_2K:
rb_size = RFH_RXF_DMA_RB_SIZE_2K;
break;
@@ -1302,7 +1301,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
int i)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
bool page_stolen = false;
int max_len = trans_pcie->rx_buf_bytes;
u32 offset = 0;
@@ -1369,8 +1368,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
if (reclaim && !pkt->hdr.group_id) {
int i;
- for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
- if (trans_pcie->no_reclaim_cmds[i] ==
+ for (i = 0; i < trans->conf.n_no_reclaim_cmds; i++) {
+ if (trans->conf.no_reclaim_cmds[i] ==
pkt->hdr.cmd) {
reclaim = false;
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index f9a9fece9b6d..a47a5922ff24 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -157,7 +157,7 @@ static void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
return;
if (trans->state >= IWL_TRANS_FW_STARTED &&
- trans_pcie->fw_reset_handshake) {
+ trans->conf.fw_reset_handshake) {
/*
* Reset handshake can dump firmware on timeout, but that
* should assume that the firmware is already dead.
@@ -270,7 +270,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
return -ENOMEM;
/* Allocate or reset and init all Tx and Command queues */
- if (iwl_txq_gen2_init(trans, trans_pcie->txqs.cmd.q_id, queue_size))
+ if (iwl_txq_gen2_init(trans, trans->conf.cmd_queue, queue_size))
return -ENOMEM;
/* enable shadow regs in HW */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index fcbf2825042e..4d281c702eec 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1966,44 +1966,17 @@ void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
}
-void iwl_trans_pcie_configure(struct iwl_trans *trans,
- const struct iwl_trans_config *trans_cfg)
+void iwl_trans_pcie_op_mode_enter(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
/* free all first - we might be reconfigured for a different size */
iwl_pcie_free_rbs_pool(trans);
- trans_pcie->txqs.cmd.q_id = trans_cfg->cmd_queue;
- trans_pcie->txqs.cmd.fifo = trans_cfg->cmd_fifo;
- trans_pcie->txqs.page_offs = trans_cfg->cb_data_offs;
- trans_pcie->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
- trans_pcie->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver;
-
- if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
- trans_pcie->n_no_reclaim_cmds = 0;
- else
- trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
- if (trans_pcie->n_no_reclaim_cmds)
- memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
- trans_pcie->n_no_reclaim_cmds * sizeof(u8));
-
- trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
trans_pcie->rx_page_order =
- iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
+ iwl_trans_get_rb_size_order(trans->conf.rx_buf_size);
trans_pcie->rx_buf_bytes =
- iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
- trans_pcie->supported_dma_mask = DMA_BIT_MASK(12);
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- trans_pcie->supported_dma_mask = DMA_BIT_MASK(11);
-
- trans_pcie->scd_set_active = trans_cfg->scd_set_active;
-
- trans->command_groups = trans_cfg->command_groups;
- trans->command_groups_size = trans_cfg->command_groups_size;
-
-
- trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake;
+ iwl_trans_get_rb_size(trans->conf.rx_buf_size);
}
void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions,
@@ -2702,7 +2675,7 @@ int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
cnt < trans->trans_cfg->base_params->num_of_queues;
cnt++) {
- if (cnt == trans_pcie->txqs.cmd.q_id)
+ if (cnt == trans->conf.cmd_queue)
continue;
if (!test_bit(cnt, trans_pcie->txqs.queue_used))
continue;
@@ -2893,7 +2866,7 @@ static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
else
seq_puts(seq, "(unallocated)");
- if (state->pos == trans_pcie->txqs.cmd.q_id)
+ if (state->pos == trans->conf.cmd_queue)
seq_puts(seq, " (HCMD)");
seq_puts(seq, "\n");
@@ -3632,7 +3605,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_fw_error_dump_data *data;
- struct iwl_txq *cmdq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
+ struct iwl_txq *cmdq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
struct iwl_fw_error_dump_txcmd *txcmd;
struct iwl_trans_dump_data *dump_data;
u32 len, num_rbs = 0, monitor_len = 0;
@@ -3847,6 +3820,10 @@ iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfd);
}
+ trans_pcie->supported_dma_mask = (u32)DMA_BIT_MASK(12);
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ trans_pcie->supported_dma_mask = (u32)DMA_BIT_MASK(11);
+
info->max_skb_frags = IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie);
trans_pcie->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 665c359e8479..b567ff3f9052 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -18,13 +18,12 @@
static struct page *get_workaround_page(struct iwl_trans *trans,
struct sk_buff *skb)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tso_page_info *info;
struct page **page_ptr;
struct page *ret;
dma_addr_t phys;
- page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
+ page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs);
ret = alloc_page(GFP_ATOMIC);
if (!ret)
@@ -751,7 +750,8 @@ int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
- trans_pcie->txqs.dev_cmd_offs);
+ trans->conf.cb_data_offs +
+ sizeof(void *));
*dev_cmd_ptr = dev_cmd;
__skb_queue_tail(&txq->overflow_q, skb);
@@ -827,7 +827,7 @@ static void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
txq_id, txq->read_ptr);
- if (txq_id != trans_pcie->txqs.cmd.q_id) {
+ if (txq_id != trans->conf.cmd_queue) {
int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
struct iwl_cmd_meta *cmd_meta = &txq->entries[idx].meta;
struct sk_buff *skb = txq->entries[idx].skb;
@@ -901,7 +901,7 @@ static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)
iwl_txq_gen2_unmap(trans, txq_id);
/* De-alloc array of command/tx buffers */
- if (txq_id == trans_pcie->txqs.cmd.q_id)
+ if (txq_id == trans->conf.cmd_queue)
for (i = 0; i < txq->n_window; i++) {
kfree_sensitive(txq->entries[i].cmd);
kfree_sensitive(txq->entries[i].free_buf);
@@ -1059,7 +1059,7 @@ int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
if (IS_ERR(txq))
return PTR_ERR(txq);
- if (trans_pcie->txqs.queue_alloc_cmd_ver == 0) {
+ if (trans->conf.queue_alloc_cmd_ver == 0) {
memset(&cmd.old, 0, sizeof(cmd.old));
cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr);
cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
@@ -1076,7 +1076,7 @@ int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
hcmd.id = SCD_QUEUE_CFG;
hcmd.len[0] = sizeof(cmd.old);
hcmd.data[0] = &cmd.old;
- } else if (trans_pcie->txqs.queue_alloc_cmd_ver == 3) {
+ } else if (trans->conf.queue_alloc_cmd_ver == 3) {
memset(&cmd.new, 0, sizeof(cmd.new));
cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD);
cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr);
@@ -1171,7 +1171,7 @@ int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
}
ret = iwl_txq_init(trans, queue, queue_size,
- (txq_id == trans_pcie->txqs.cmd.q_id));
+ (txq_id == trans->conf.cmd_queue));
if (ret) {
IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
goto error;
@@ -1201,7 +1201,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
void *dup_buf = NULL;
@@ -1318,7 +1318,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
out_cmd->hdr_wide.reserved = 0;
out_cmd->hdr_wide.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans->conf.cmd_queue) |
INDEX_TO_SEQ(txq->write_ptr));
cmd_pos = sizeof(struct iwl_cmd_header_wide);
@@ -1366,7 +1366,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
"Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
iwl_get_cmd_string(trans, cmd->id), group_id,
out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
- cmd_size, txq->write_ptr, idx, trans_pcie->txqs.cmd.q_id);
+ cmd_size, txq->write_ptr, idx, trans->conf.cmd_queue);
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index fcddf32196e1..632d147f7921 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -78,7 +78,6 @@ void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
struct iwl_txq *txq)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 reg = 0;
int txq_id = txq->id;
@@ -91,7 +90,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
* 3. there is a chance that the NIC is asleep
*/
if (!trans->trans_cfg->base_params->shadow_reg_enable &&
- txq_id != trans_pcie->txqs.cmd.q_id &&
+ txq_id != trans->conf.cmd_queue &&
test_bit(STATUS_TPOWER_PMI, &trans->status)) {
/*
* wake up nic if it's powered down ...
@@ -226,11 +225,10 @@ static void iwl_pcie_free_and_unmap_tso_page(struct iwl_trans *trans,
void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_cmd_meta *cmd_meta)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct page **page_ptr;
struct page *next;
- page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
+ page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs);
next = *page_ptr;
*page_ptr = NULL;
@@ -396,7 +394,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
txq_id, txq->read_ptr);
- if (txq_id != trans_pcie->txqs.cmd.q_id) {
+ if (txq_id != trans->conf.cmd_queue) {
struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
struct iwl_cmd_meta *cmd_meta =
&txq->entries[txq->read_ptr].meta;
@@ -410,7 +408,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
if (txq->read_ptr == txq->write_ptr &&
- txq_id == trans_pcie->txqs.cmd.q_id)
+ txq_id == trans->conf.cmd_queue)
iwl_pcie_clear_cmd_in_flight(trans);
}
@@ -448,7 +446,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
iwl_pcie_txq_unmap(trans, txq_id);
/* De-alloc array of command/tx buffers */
- if (txq_id == trans_pcie->txqs.cmd.q_id)
+ if (txq_id == trans->conf.cmd_queue)
for (i = 0; i < txq->n_window; i++) {
kfree_sensitive(txq->entries[i].cmd);
kfree_sensitive(txq->entries[i].free_buf);
@@ -509,8 +507,8 @@ void iwl_pcie_tx_start(struct iwl_trans *trans)
if (trans->trans_cfg->base_params->scd_chain_ext_wa)
iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
- iwl_trans_ac_txq_enable(trans, trans_pcie->txqs.cmd.q_id,
- trans_pcie->txqs.cmd.fifo,
+ iwl_trans_ac_txq_enable(trans, trans->conf.cmd_queue,
+ trans->conf.cmd_fifo,
IWL_DEF_WD_TIMEOUT);
/* Activate all Tx DMA/FIFO channels */
@@ -847,7 +845,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
- bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id);
+ bool cmd_queue = (txq_id == trans->conf.cmd_queue);
if (cmd_queue)
slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
@@ -964,7 +962,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
- bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id);
+ bool cmd_queue = (txq_id == trans->conf.cmd_queue);
if (cmd_queue)
slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
@@ -1163,15 +1161,15 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
fifo = cfg->fifo;
/* Disable the scheduler prior configuring the cmd queue */
- if (txq_id == trans_pcie->txqs.cmd.q_id &&
- trans_pcie->scd_set_active)
+ if (txq_id == trans->conf.cmd_queue &&
+ trans->conf.scd_set_active)
iwl_scd_enable_set_active(trans, 0);
/* Stop this Tx queue before configuring it */
iwl_scd_txq_set_inactive(trans, txq_id);
/* Set this queue as a chain-building queue unless it is CMD */
- if (txq_id != trans_pcie->txqs.cmd.q_id)
+ if (txq_id != trans->conf.cmd_queue)
iwl_scd_txq_set_chain(trans, txq_id);
if (cfg->aggregate) {
@@ -1241,8 +1239,8 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
SCD_QUEUE_STTS_REG_MSK);
/* enable the scheduler for this queue (only) */
- if (txq_id == trans_pcie->txqs.cmd.q_id &&
- trans_pcie->scd_set_active)
+ if (txq_id == trans->conf.cmd_queue &&
+ trans->conf.scd_set_active)
iwl_scd_enable_set_active(trans, BIT(txq_id));
IWL_DEBUG_TX_QUEUES(trans,
@@ -1313,7 +1311,7 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
struct iwl_txq *txq = trans_pcie->txqs.txq[i];
- if (i == trans_pcie->txqs.cmd.q_id)
+ if (i == trans->conf.cmd_queue)
continue;
/* we skip the command queue (obviously) so it's OK to nest */
@@ -1346,7 +1344,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
void *dup_buf = NULL;
@@ -1361,7 +1359,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
unsigned long flags;
- if (WARN(!trans->wide_cmd_header &&
+ if (WARN(!trans->conf.wide_cmd_header &&
group_id > IWL_ALWAYS_LONG_GROUP,
"unsupported wide command %#x\n", cmd->id))
return -EINVAL;
@@ -1475,7 +1473,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
sizeof(struct iwl_cmd_header_wide));
out_cmd->hdr_wide.reserved = 0;
out_cmd->hdr_wide.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans->conf.cmd_queue) |
INDEX_TO_SEQ(txq->write_ptr));
cmd_pos = sizeof(struct iwl_cmd_header_wide);
@@ -1483,7 +1481,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
} else {
out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
out_cmd->hdr.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans->conf.cmd_queue) |
INDEX_TO_SEQ(txq->write_ptr));
out_cmd->hdr.group_id = 0;
@@ -1534,7 +1532,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
iwl_get_cmd_string(trans, cmd->id),
group_id, out_cmd->hdr.cmd,
le16_to_cpu(out_cmd->hdr.sequence),
- cmd_size, txq->write_ptr, idx, trans_pcie->txqs.cmd.q_id);
+ cmd_size, txq->write_ptr, idx, trans->conf.cmd_queue);
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
@@ -1633,14 +1631,14 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
struct iwl_device_cmd *cmd;
struct iwl_cmd_meta *meta;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
/* If a Tx command is being handled and it isn't in the actual
* command queue then there a command routing bug has been introduced
* in the queue management code. */
- if (WARN(txq_id != trans_pcie->txqs.cmd.q_id,
+ if (WARN(txq_id != trans->conf.cmd_queue,
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
- txq_id, trans_pcie->txqs.cmd.q_id, sequence, txq->read_ptr,
+ txq_id, trans->conf.cmd_queue, sequence, txq->read_ptr,
txq->write_ptr)) {
iwl_print_hex_error(trans, pkt, 32);
return;
@@ -1753,7 +1751,7 @@ static void *iwl_pcie_get_page_hdr(struct iwl_trans *trans,
dma_addr_t phys;
void *ret;
- page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
+ page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs);
if (WARN_ON(*page_ptr))
return NULL;
@@ -2154,7 +2152,8 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
- trans_pcie->txqs.dev_cmd_offs);
+ trans->conf.cb_data_offs +
+ sizeof(void *));
*dev_cmd_ptr = dev_cmd;
__skb_queue_tail(&txq->overflow_q, skb);
@@ -2322,7 +2321,7 @@ static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
- if (txq_id != trans_pcie->txqs.cmd.q_id)
+ if (txq_id != trans->conf.cmd_queue)
sta_id = tx_cmd->sta_id;
bc_ent = cpu_to_le16(1 | (sta_id << 12));
@@ -2344,7 +2343,7 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
int txq_read_ptr, txq_write_ptr;
/* This function is not meant to release cmd queue*/
- if (WARN_ON(txq_id == trans_pcie->txqs.cmd.q_id))
+ if (WARN_ON(txq_id == trans->conf.cmd_queue))
return;
if (WARN_ON(!txq))
@@ -2457,7 +2456,8 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct iwl_device_tx_cmd *dev_cmd_ptr;
dev_cmd_ptr = *(void **)((u8 *)skb->cb +
- trans_pcie->txqs.dev_cmd_offs);
+ trans->conf.cb_data_offs +
+ sizeof(void *));
/*
* Note that we can very well be overflowing again.
@@ -2553,7 +2553,7 @@ static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
- struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
int cmd_idx;
int ret;