diff options
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/pcie/tx.c')
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/tx.c | 257 |
1 files changed, 131 insertions, 126 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 334ebd4c12fa..7abd7c7daa89 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2003-2014, 2018-2021, 2023-2024 Intel Corporation + * Copyright (C) 2003-2014, 2018-2021, 2023-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -78,7 +78,6 @@ void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr) static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 reg = 0; int txq_id = txq->id; @@ -90,8 +89,8 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, * 2. NIC is woken up for CMD regardless of shadow outside this function * 3. there is a chance that the NIC is asleep */ - if (!trans->trans_cfg->base_params->shadow_reg_enable && - txq_id != trans_pcie->txqs.cmd.q_id && + if (!trans->mac_cfg->base->shadow_reg_enable && + txq_id != trans->conf.cmd_queue && test_bit(STATUS_TPOWER_PMI, &trans->status)) { /* * wake up nic if it's powered down ... @@ -125,7 +124,7 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; - for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { + for (i = 0; i < trans->mac_cfg->base->num_of_queues; i++) { struct iwl_txq *txq = trans_pcie->txqs.txq[i]; if (!test_bit(i, trans_pcie->txqs.queue_used)) @@ -193,7 +192,7 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - if (!trans->trans_cfg->base_params->apmg_wake_up_wa) + if (!trans->mac_cfg->base->apmg_wake_up_wa) return; spin_lock(&trans_pcie->reg_lock); @@ -226,11 +225,10 @@ static void iwl_pcie_free_and_unmap_tso_page(struct iwl_trans *trans, void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_cmd_meta *cmd_meta) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct page **page_ptr; struct page *next; - page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs); + page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs); next = *page_ptr; *page_ptr = NULL; @@ -280,10 +278,12 @@ iwl_txq_gen1_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans, struct iwl_tfd *tfd) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + tfd->num_tbs = 0; - iwl_pcie_gen1_tfd_set_tb(tfd, 0, trans->invalid_tx_cmd.dma, - trans->invalid_tx_cmd.size); + iwl_pcie_gen1_tfd_set_tb(tfd, 0, trans_pcie->invalid_tx_cmd.dma, + trans_pcie->invalid_tx_cmd.size); } static void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, @@ -355,7 +355,7 @@ static void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq, /* We have only q->n_window txq->entries, but we use * TFD_QUEUE_SIZE_MAX tfds */ - if (trans->trans_cfg->gen2) + if (trans->mac_cfg->gen2) iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, iwl_txq_get_tfd(trans, txq, read_ptr)); else @@ -394,7 +394,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", txq_id, txq->read_ptr); - if (txq_id != trans_pcie->txqs.cmd.q_id) { + if (txq_id != trans->conf.cmd_queue) { struct sk_buff *skb = txq->entries[txq->read_ptr].skb; struct iwl_cmd_meta *cmd_meta = &txq->entries[txq->read_ptr].meta; @@ -408,7 +408,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); if (txq->read_ptr == txq->write_ptr && - txq_id == trans_pcie->txqs.cmd.q_id) + txq_id == trans->conf.cmd_queue) iwl_pcie_clear_cmd_in_flight(trans); } @@ -446,7 +446,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) iwl_pcie_txq_unmap(trans, txq_id); /* De-alloc array of command/tx buffers */ - if (txq_id == trans_pcie->txqs.cmd.q_id) + if (txq_id == trans->conf.cmd_queue) for (i = 0; i < txq->n_window; i++) { kfree_sensitive(txq->entries[i].cmd); kfree_sensitive(txq->entries[i].free_buf); @@ -456,7 +456,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) if (txq->tfds) { dma_free_coherent(dev, trans_pcie->txqs.tfd.size * - trans->trans_cfg->base_params->max_tfd_queue_size, + trans->mac_cfg->base->max_tfd_queue_size, txq->tfds, txq->dma_addr); txq->dma_addr = 0; txq->tfds = NULL; @@ -469,16 +469,16 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) kfree(txq->entries); txq->entries = NULL; - del_timer_sync(&txq->stuck_timer); + timer_delete_sync(&txq->stuck_timer); /* 0-fill queue descriptor structure */ memset(txq, 0, sizeof(*txq)); } -void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) +void iwl_pcie_tx_start(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int nq = trans->trans_cfg->base_params->num_of_queues; + int nq = trans->mac_cfg->base->num_of_queues; int chan; u32 reg_val; int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - @@ -493,13 +493,10 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) trans_pcie->scd_base_addr = iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); - WARN_ON(scd_base_addr != 0 && - scd_base_addr != trans_pcie->scd_base_addr); - /* reset context data, TX status and translation data */ - iwl_trans_write_mem(trans, trans_pcie->scd_base_addr + - SCD_CONTEXT_MEM_LOWER_BOUND, - NULL, clear_dwords); + iwl_trans_pcie_write_mem(trans, trans_pcie->scd_base_addr + + SCD_CONTEXT_MEM_LOWER_BOUND, + NULL, clear_dwords); iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, trans_pcie->txqs.scd_bc_tbls.dma >> 10); @@ -507,12 +504,12 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) /* The chain extension of the SCD doesn't work well. This feature is * enabled by default by the HW, so we need to disable it manually. */ - if (trans->trans_cfg->base_params->scd_chain_ext_wa) + if (trans->mac_cfg->base->scd_chain_ext_wa) iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); - iwl_trans_ac_txq_enable(trans, trans_pcie->txqs.cmd.q_id, - trans_pcie->txqs.cmd.fifo, - trans_pcie->txqs.cmd.wdg_timeout); + iwl_trans_ac_txq_enable(trans, trans->conf.cmd_queue, + trans->conf.cmd_fifo, + IWL_DEF_WD_TIMEOUT); /* Activate all Tx DMA/FIFO channels */ iwl_scd_activate_fifos(trans); @@ -529,7 +526,7 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); /* Enable L1-Active */ - if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) + if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000) iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, APMG_PCIDEV_STT_VAL_L1_ACT_DIS); } @@ -543,13 +540,13 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) * we should never get here in gen2 trans mode return early to avoid * having invalid accesses */ - if (WARN_ON_ONCE(trans->trans_cfg->gen2)) + if (WARN_ON_ONCE(trans->mac_cfg->gen2)) return; - for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; + for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues; txq_id++) { struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; - if (trans->trans_cfg->gen2) + if (trans->mac_cfg->gen2) iwl_write_direct64(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), txq->dma_addr); @@ -571,7 +568,7 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will * contain garbage. */ - iwl_pcie_tx_start(trans, 0); + iwl_pcie_tx_start(trans); } static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) @@ -633,7 +630,7 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans) return 0; /* Unmap DMA from host system and free skb's */ - for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; + for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues; txq_id++) iwl_pcie_txq_unmap(trans, txq_id); @@ -656,7 +653,7 @@ void iwl_pcie_tx_free(struct iwl_trans *trans) /* Tx queues */ if (trans_pcie->txq_memory) { for (txq_id = 0; - txq_id < trans->trans_cfg->base_params->num_of_queues; + txq_id < trans->mac_cfg->base->num_of_queues; txq_id++) { iwl_pcie_txq_free(trans, txq_id); trans_pcie->txqs.txq[txq_id] = NULL; @@ -678,7 +675,7 @@ void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) bool active; u8 fifo; - if (trans->trans_cfg->gen2) { + if (trans->mac_cfg->gen2) { IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id, txq->read_ptr, txq->write_ptr); /* TODO: access new SCD registers and dump them */ @@ -695,9 +692,9 @@ void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) jiffies_to_msecs(txq->wd_timeout), txq->read_ptr, txq->write_ptr, iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & - (trans->trans_cfg->base_params->max_tfd_queue_size - 1), + (trans->mac_cfg->base->max_tfd_queue_size - 1), iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & - (trans->trans_cfg->base_params->max_tfd_queue_size - 1), + (trans->mac_cfg->base->max_tfd_queue_size - 1), iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); } @@ -723,8 +720,8 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, bool cmd_queue) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - size_t num_entries = trans->trans_cfg->gen2 ? - slots_num : trans->trans_cfg->base_params->max_tfd_queue_size; + size_t num_entries = trans->mac_cfg->gen2 ? + slots_num : trans->mac_cfg->base->max_tfd_queue_size; size_t tfd_sz; size_t tb0_buf_sz; int i; @@ -779,7 +776,7 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, for (i = 0; i < num_entries; i++) { void *tfd = iwl_txq_get_tfd(trans, txq, i); - if (trans->trans_cfg->gen2) + if (trans->mac_cfg->gen2) iwl_txq_set_tfd_invalid_gen2(trans, tfd); else iwl_txq_set_tfd_invalid_gen1(trans, tfd); @@ -799,6 +796,8 @@ error: return -ENOMEM; } +#define BC_TABLE_SIZE (sizeof(struct iwl_bc_tbl_entry) * TFD_QUEUE_BC_SIZE) + /* * iwl_pcie_tx_alloc - allocate TX context * Allocate all Tx DMA structures and initialize them @@ -808,12 +807,12 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) int ret; int txq_id, slots_num; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues; + u16 bc_tbls_size = trans->mac_cfg->base->num_of_queues; - if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)) + if (WARN_ON(trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)) return -EINVAL; - bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl); + bc_tbls_size *= BC_TABLE_SIZE; /*It is not allowed to alloc twice, so warn when this happens. * We cannot rely on the previous allocation, so free and fail */ @@ -837,7 +836,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) } trans_pcie->txq_memory = - kcalloc(trans->trans_cfg->base_params->num_of_queues, + kcalloc(trans->mac_cfg->base->num_of_queues, sizeof(struct iwl_txq), GFP_KERNEL); if (!trans_pcie->txq_memory) { IWL_ERR(trans, "Not enough memory for txq\n"); @@ -846,16 +845,16 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) } /* Alloc and init all Tx queues, including the command queue (#4/#9) */ - for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; + for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues; txq_id++) { - bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id); + bool cmd_queue = (txq_id == trans->conf.cmd_queue); if (cmd_queue) slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, - trans->cfg->min_txq_size); + trans->mac_cfg->base->min_txq_size); else slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, - trans->cfg->min_ba_txq_size); + trans->mac_cfg->base->min_ba_txq_size); trans_pcie->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id]; ret = iwl_pcie_txq_alloc(trans, trans_pcie->txqs.txq[txq_id], slots_num, cmd_queue); @@ -905,7 +904,7 @@ int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, bool cmd_queue) { u32 tfd_queue_max_size = - trans->trans_cfg->base_params->max_tfd_queue_size; + trans->mac_cfg->base->max_tfd_queue_size; int ret; txq->need_update = false; @@ -963,16 +962,16 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) spin_unlock_bh(&trans_pcie->irq_lock); /* Alloc and init all Tx queues, including the command queue (#4/#9) */ - for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; + for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues; txq_id++) { - bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id); + bool cmd_queue = (txq_id == trans->conf.cmd_queue); if (cmd_queue) slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, - trans->cfg->min_txq_size); + trans->mac_cfg->base->min_txq_size); else slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, - trans->cfg->min_ba_txq_size); + trans->mac_cfg->base->min_ba_txq_size); ret = iwl_txq_init(trans, trans_pcie->txqs.txq[txq_id], slots_num, cmd_queue); if (ret) { @@ -991,7 +990,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) } iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); - if (trans->trans_cfg->base_params->num_of_queues > 20) + if (trans->mac_cfg->base->num_of_queues > 20) iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_ENABLE_31_QUEUES); @@ -1012,7 +1011,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, if (test_bit(STATUS_TRANS_DEAD, &trans->status)) return -ENODEV; - if (!trans->trans_cfg->base_params->apmg_wake_up_wa) + if (!trans->mac_cfg->base->apmg_wake_up_wa) return 0; /* @@ -1021,7 +1020,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, * returned. This needs to be done only on NICs that have * apmg_wake_up_wa set (see above.) */ - if (!_iwl_trans_pcie_grab_nic_access(trans)) + if (!_iwl_trans_pcie_grab_nic_access(trans, false)) return -EIO; /* @@ -1054,7 +1053,7 @@ static void iwl_txq_progress(struct iwl_txq *txq) * since we're making progress on this queue */ if (txq->read_ptr == txq->write_ptr) - del_timer(&txq->stuck_timer); + timer_delete(&txq->stuck_timer); else mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); } @@ -1090,12 +1089,12 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) idx = iwl_txq_get_cmd_index(txq, idx); r = iwl_txq_get_cmd_index(txq, txq->read_ptr); - if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size || + if (idx >= trans->mac_cfg->base->max_tfd_queue_size || (!iwl_txq_used(txq, idx, txq->read_ptr, txq->write_ptr))) { WARN_ONCE(test_bit(txq_id, trans_pcie->txqs.queue_used), "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", __func__, txq_id, idx, - trans->trans_cfg->base_params->max_tfd_queue_size, + trans->mac_cfg->base->max_tfd_queue_size, txq->write_ptr, txq->read_ptr); return; } @@ -1164,15 +1163,15 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, fifo = cfg->fifo; /* Disable the scheduler prior configuring the cmd queue */ - if (txq_id == trans_pcie->txqs.cmd.q_id && - trans_pcie->scd_set_active) + if (txq_id == trans->conf.cmd_queue && + trans->conf.scd_set_active) iwl_scd_enable_set_active(trans, 0); /* Stop this Tx queue before configuring it */ iwl_scd_txq_set_inactive(trans, txq_id); /* Set this queue as a chain-building queue unless it is CMD */ - if (txq_id != trans_pcie->txqs.cmd.q_id) + if (txq_id != trans->conf.cmd_queue) iwl_scd_txq_set_chain(trans, txq_id); if (cfg->aggregate) { @@ -1206,7 +1205,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, * this sad hardware issue. * This bug has been fixed on devices 9000 and up. */ - scd_bug = !trans->trans_cfg->mq_rx_supported && + scd_bug = !trans->mac_cfg->mq_rx_supported && !((ssn - txq->write_ptr) & 0x3f) && (ssn != txq->write_ptr); if (scd_bug) @@ -1242,8 +1241,8 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, SCD_QUEUE_STTS_REG_MSK); /* enable the scheduler for this queue (only) */ - if (txq_id == trans_pcie->txqs.cmd.q_id && - trans_pcie->scd_set_active) + if (txq_id == trans->conf.cmd_queue && + trans->conf.scd_set_active) iwl_scd_enable_set_active(trans, BIT(txq_id)); IWL_DEBUG_TX_QUEUES(trans, @@ -1293,8 +1292,9 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, if (configure_scd) { iwl_scd_txq_set_inactive(trans, txq_id); - iwl_trans_write_mem(trans, stts_addr, (const void *)zero_val, - ARRAY_SIZE(zero_val)); + iwl_trans_pcie_write_mem(trans, stts_addr, + (const void *)zero_val, + ARRAY_SIZE(zero_val)); } iwl_pcie_txq_unmap(trans, txq_id); @@ -1310,10 +1310,10 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; - for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { + for (i = 0; i < trans->mac_cfg->base->num_of_queues; i++) { struct iwl_txq *txq = trans_pcie->txqs.txq[i]; - if (i == trans_pcie->txqs.cmd.q_id) + if (i == trans->conf.cmd_queue) continue; /* we skip the command queue (obviously) so it's OK to nest */ @@ -1346,7 +1346,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; + struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue]; struct iwl_device_cmd *out_cmd; struct iwl_cmd_meta *out_meta; void *dup_buf = NULL; @@ -1361,7 +1361,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; unsigned long flags; - if (WARN(!trans->wide_cmd_header && + if (WARN(!trans->conf.wide_cmd_header && group_id > IWL_ALWAYS_LONG_GROUP, "unsupported wide command %#x\n", cmd->id)) return -EINVAL; @@ -1475,7 +1475,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, sizeof(struct iwl_cmd_header_wide)); out_cmd->hdr_wide.reserved = 0; out_cmd->hdr_wide.sequence = - cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) | + cpu_to_le16(QUEUE_TO_SEQ(trans->conf.cmd_queue) | INDEX_TO_SEQ(txq->write_ptr)); cmd_pos = sizeof(struct iwl_cmd_header_wide); @@ -1483,7 +1483,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, } else { out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); out_cmd->hdr.sequence = - cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) | + cpu_to_le16(QUEUE_TO_SEQ(trans->conf.cmd_queue) | INDEX_TO_SEQ(txq->write_ptr)); out_cmd->hdr.group_id = 0; @@ -1534,7 +1534,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, iwl_get_cmd_string(trans, cmd->id), group_id, out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), - cmd_size, txq->write_ptr, idx, trans_pcie->txqs.cmd.q_id); + cmd_size, txq->write_ptr, idx, trans->conf.cmd_queue); /* start the TFD with the minimum copy bytes */ tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); @@ -1633,14 +1633,14 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, struct iwl_device_cmd *cmd; struct iwl_cmd_meta *meta; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; + struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue]; /* If a Tx command is being handled and it isn't in the actual * command queue then there a command routing bug has been introduced * in the queue management code. */ - if (WARN(txq_id != trans_pcie->txqs.cmd.q_id, + if (WARN(txq_id != trans->conf.cmd_queue, "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", - txq_id, trans_pcie->txqs.cmd.q_id, sequence, txq->read_ptr, + txq_id, trans->conf.cmd_queue, sequence, txq->read_ptr, txq->write_ptr)) { iwl_print_hex_error(trans, pkt, 32); return; @@ -1654,7 +1654,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, group_id = cmd->hdr.group_id; cmd_id = WIDE_ID(group_id, cmd->hdr.cmd); - if (trans->trans_cfg->gen2) + if (trans->mac_cfg->gen2) iwl_txq_gen2_tfd_unmap(trans, meta, iwl_txq_get_tfd(trans, txq, index)); else @@ -1683,7 +1683,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", iwl_get_cmd_string(trans, cmd_id)); - wake_up(&trans->wait_command_queue); + wake_up(&trans_pcie->wait_command_queue); } meta->flags = 0; @@ -1753,7 +1753,7 @@ static void *iwl_pcie_get_page_hdr(struct iwl_trans *trans, dma_addr_t phys; void *ret; - page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs); + page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs); if (WARN_ON(*page_ptr)) return NULL; @@ -1855,6 +1855,7 @@ dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset, * @cmd_meta: command meta to store the scatter list information for unmapping * @hdr: output argument for TSO headers * @hdr_room: requested length for TSO headers + * @offset: offset into the data from which mapping should start * * Allocate space for a scatter gather list and TSO headers and map the SKB * using the scatter gather list. The SKB is unmapped again when the page is @@ -1864,9 +1865,12 @@ dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset, */ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_cmd_meta *cmd_meta, - u8 **hdr, unsigned int hdr_room) + u8 **hdr, unsigned int hdr_room, + unsigned int offset) { struct sg_table *sgt; + unsigned int n_segments = skb_shinfo(skb)->nr_frags + 1; + int orig_nents; if (WARN_ON_ONCE(skb_has_frag_list(skb))) return NULL; @@ -1874,8 +1878,7 @@ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb, *hdr = iwl_pcie_get_page_hdr(trans, hdr_room + __alignof__(struct sg_table) + sizeof(struct sg_table) + - (skb_shinfo(skb)->nr_frags + 1) * - sizeof(struct scatterlist), + n_segments * sizeof(struct scatterlist), skb); if (!*hdr) return NULL; @@ -1883,14 +1886,15 @@ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb, sgt = (void *)PTR_ALIGN(*hdr + hdr_room, __alignof__(struct sg_table)); sgt->sgl = (void *)(sgt + 1); - sg_init_table(sgt->sgl, skb_shinfo(skb)->nr_frags + 1); + sg_init_table(sgt->sgl, n_segments); /* Only map the data, not the header (it is copied to the TSO page) */ - sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, skb_headlen(skb), - skb->data_len); - if (WARN_ON_ONCE(sgt->orig_nents <= 0)) + orig_nents = skb_to_sgvec(skb, sgt->sgl, offset, skb->len - offset); + if (WARN_ON_ONCE(orig_nents <= 0)) return NULL; + sgt->orig_nents = orig_nents; + /* And map the entire SKB */ if (dma_map_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0) < 0) return NULL; @@ -1908,7 +1912,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, u16 tb1_len) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; + struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload; struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; unsigned int mss = skb_shinfo(skb)->gso_size; @@ -1939,7 +1943,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; /* Our device supports 9 segments at most, it will fit in 1 page */ - sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room); + sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room, + snap_ip_tcp_hdrlen + hdr_len + iv_len); if (!sgt) return -ENOMEM; @@ -2062,14 +2067,14 @@ static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, int num_tbs) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwlagn_scd_bc_tbl *scd_bc_tbl; + struct iwl_bc_tbl_entry *scd_bc_tbl; int write_ptr = txq->write_ptr; int txq_id = txq->id; u8 sec_ctl = 0; u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; __le16 bc_ent; struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd; - struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; + struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload; u8 sta_id = tx_cmd->sta_id; scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr; @@ -2087,7 +2092,8 @@ static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; break; } - if (trans_pcie->txqs.bc_table_dword) + + if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) len = DIV_ROUND_UP(len, 4); if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) @@ -2095,10 +2101,10 @@ static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, bc_ent = cpu_to_le16(len | (sta_id << 12)); - scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; + scd_bc_tbl[txq_id * BC_TABLE_SIZE + write_ptr].tfd_offset = bc_ent; if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) - scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = + scd_bc_tbl[txq_id * BC_TABLE_SIZE + TFD_QUEUE_SIZE_MAX + write_ptr].tfd_offset = bc_ent; } @@ -2107,7 +2113,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct ieee80211_hdr *hdr; - struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; + struct iwl_tx_cmd_v6 *tx_cmd = (struct iwl_tx_cmd_v6 *)dev_cmd->payload; struct iwl_cmd_meta *out_meta; struct iwl_txq *txq; dma_addr_t tb0_phys, tb1_phys, scratch_phys; @@ -2148,7 +2154,8 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_tx_cmd **dev_cmd_ptr; dev_cmd_ptr = (void *)((u8 *)skb->cb + - trans_pcie->txqs.dev_cmd_offs); + trans->conf.cb_data_offs + + sizeof(void *)); *dev_cmd_ptr = dev_cmd; __skb_queue_tail(&txq->overflow_q, skb); @@ -2179,7 +2186,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr); scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + - offsetof(struct iwl_tx_cmd, scratch); + offsetof(struct iwl_tx_cmd_v6, scratch); tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); @@ -2194,7 +2201,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, * (This calculation modifies the TX command, so do it before the * setup of the first TB) */ - len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + + len = sizeof(struct iwl_tx_cmd_v6) + sizeof(struct iwl_cmd_header) + hdr_len - IWL_FIRST_TB_SIZE; /* do not align A-MSDU to dword as the subframe header aligns it */ amsdu = ieee80211_is_data_qos(fc) && @@ -2217,9 +2224,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, IWL_FIRST_TB_SIZE, true); /* there must be data left over for TB1 or this code must be changed */ - BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE); + BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_v6) < IWL_FIRST_TB_SIZE); BUILD_BUG_ON(sizeof(struct iwl_cmd_header) + - offsetofend(struct iwl_tx_cmd, scratch) > + offsetofend(struct iwl_tx_cmd_v6, scratch) > IWL_FIRST_TB_SIZE); /* map the data for TB1 */ @@ -2307,24 +2314,24 @@ static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans, int read_ptr) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr; + struct iwl_bc_tbl_entry *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr; int txq_id = txq->id; u8 sta_id = 0; __le16 bc_ent; struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd; - struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; + struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload; WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); - if (txq_id != trans_pcie->txqs.cmd.q_id) + if (txq_id != trans->conf.cmd_queue) sta_id = tx_cmd->sta_id; bc_ent = cpu_to_le16(1 | (sta_id << 12)); - scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; + scd_bc_tbl[txq_id * BC_TABLE_SIZE + read_ptr].tfd_offset = bc_ent; if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) - scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = + scd_bc_tbl[txq_id * BC_TABLE_SIZE + TFD_QUEUE_SIZE_MAX + read_ptr].tfd_offset = bc_ent; } @@ -2338,7 +2345,7 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, int txq_read_ptr, txq_write_ptr; /* This function is not meant to release cmd queue*/ - if (WARN_ON(txq_id == trans_pcie->txqs.cmd.q_id)) + if (WARN_ON(txq_id == trans->conf.cmd_queue)) return; if (WARN_ON(!txq)) @@ -2380,7 +2387,7 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, IWL_ERR(trans, "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", __func__, txq_id, last_to_free, - trans->trans_cfg->base_params->max_tfd_queue_size, + trans->mac_cfg->base->max_tfd_queue_size, txq_write_ptr, txq_read_ptr); iwl_op_mode_time_point(trans->op_mode, @@ -2409,7 +2416,7 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, txq->entries[read_ptr].skb = NULL; - if (!trans->trans_cfg->gen2) + if (!trans->mac_cfg->gen2) iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq, txq_read_ptr); @@ -2451,7 +2458,8 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, struct iwl_device_tx_cmd *dev_cmd_ptr; dev_cmd_ptr = *(void **)((u8 *)skb->cb + - trans_pcie->txqs.dev_cmd_offs); + trans->conf.cb_data_offs + + sizeof(void *)); /* * Note that we can very well be overflowing again. @@ -2524,7 +2532,7 @@ void iwl_pcie_freeze_txq_timer(struct iwl_trans *trans, /* remember how long until the timer fires */ txq->frozen_expiry_remainder = txq->stuck_timer.expires - now; - del_timer(&txq->stuck_timer); + timer_delete(&txq->stuck_timer); goto next_queue; } @@ -2543,11 +2551,11 @@ next_queue: #define HOST_COMPLETE_TIMEOUT (2 * HZ) static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans, - struct iwl_host_cmd *cmd) + struct iwl_host_cmd *cmd, + const char *cmd_str) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); - struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; + struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue]; int cmd_idx; int ret; @@ -2560,7 +2568,7 @@ static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans, IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str); - if (trans->trans_cfg->gen2) + if (trans->mac_cfg->gen2) cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd); else cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); @@ -2573,7 +2581,7 @@ static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans, return ret; } - ret = wait_event_timeout(trans->wait_command_queue, + ret = wait_event_timeout(trans_pcie->wait_command_queue, !test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status), HOST_COMPLETE_TIMEOUT); @@ -2589,7 +2597,7 @@ static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans, cmd_str); ret = -ETIMEDOUT; - iwl_trans_sync_nmi(trans); + iwl_trans_pcie_sync_nmi(trans); goto cancel; } @@ -2640,6 +2648,8 @@ cancel: int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { + const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); + /* Make sure the NIC is still alive in the bus */ if (test_bit(STATUS_TRANS_DEAD, &trans->status)) return -ENODEV; @@ -2651,20 +2661,16 @@ int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, return -ERFKILL; } - if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 && - !(cmd->flags & CMD_SEND_IN_D3))) { - IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id); - return -EHOSTDOWN; - } - if (cmd->flags & CMD_ASYNC) { int ret; + IWL_DEBUG_INFO(trans, "Sending async command %s\n", cmd_str); + /* An asynchronous command can not expect an SKB to be set. */ if (WARN_ON(cmd->flags & CMD_WANT_SKB)) return -EINVAL; - if (trans->trans_cfg->gen2) + if (trans->mac_cfg->gen2) ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd); else ret = iwl_pcie_enqueue_hcmd(trans, cmd); @@ -2678,6 +2684,5 @@ int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, return 0; } - return iwl_trans_pcie_send_hcmd_sync(trans, cmd); + return iwl_trans_pcie_send_hcmd_sync(trans, cmd, cmd_str); } -IWL_EXPORT_SYMBOL(iwl_trans_pcie_send_hcmd); |