]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
authorJohn W. Linville <linville@tuxdriver.com>
Wed, 6 Mar 2013 15:21:17 +0000 (10:21 -0500)
committerJohn W. Linville <linville@tuxdriver.com>
Wed, 6 Mar 2013 15:21:17 +0000 (10:21 -0500)
21 files changed:
drivers/net/wireless/iwlwifi/dvm/sta.c
drivers/net/wireless/iwlwifi/iwl-devtrace.h
drivers/net/wireless/iwlwifi/iwl-drv.c
drivers/net/wireless/iwlwifi/iwl-modparams.h
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/mvm/fw-api.h
drivers/net/wireless/iwlwifi/mvm/fw.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/mvm/rx.c
drivers/net/wireless/iwlwifi/mvm/sta.c
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/rx.c
drivers/net/wireless/iwlwifi/pcie/tx.c
net/mac80211/cfg.c
net/mac80211/iface.c
net/mac80211/mlme.c
net/mac80211/tx.c
net/wireless/core.c
net/wireless/nl80211.c

index 94ef33838bc69b615f8f47140922bf0d13eb2453..b775769f8322c437558712af955332296e95db89 100644 (file)
@@ -151,7 +151,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
                       sta_id, sta->sta.addr, flags & CMD_ASYNC ?  "a" : "");
 
        if (!(flags & CMD_ASYNC)) {
-               cmd.flags |= CMD_WANT_SKB | CMD_WANT_HCMD;
+               cmd.flags |= CMD_WANT_SKB;
                might_sleep();
        }
 
index 10f01793d7a605cf4dc7e4d6312ac4af8f4caa64..81aa91fab5aafcc1dc7fa597c85878d01ab83685 100644 (file)
@@ -363,7 +363,7 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
                __entry->flags = cmd->flags;
                memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr));
 
-               for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
+               for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
                        if (!cmd->len[i])
                                continue;
                        memcpy((u8 *)__get_dynamic_array(hcmd) + offset,
index 6f228bb2b84431e61cd8292d1563f81faa91efc6..fbfd2d1371176e247a22e5fca00ee5809088c306 100644 (file)
@@ -1102,7 +1102,6 @@ void iwl_drv_stop(struct iwl_drv *drv)
 
 /* shared module parameters */
 struct iwl_mod_params iwlwifi_mod_params = {
-       .amsdu_size_8K = 1,
        .restart_fw = 1,
        .plcp_check = true,
        .bt_coex_active = true,
@@ -1207,7 +1206,7 @@ MODULE_PARM_DESC(11n_disable,
        "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
 module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
                   int, S_IRUGO);
-MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
+MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)");
 module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, int, S_IRUGO);
 MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
 
index e5e3a79eae2fbd5e8f544db2cc9605522770dddb..2c2a729092f5e894013c39f1eccb0a739a5e56d4 100644 (file)
@@ -91,7 +91,7 @@ enum iwl_power_level {
  * @sw_crypto: using hardware encryption, default = 0
  * @disable_11n: disable 11n capabilities, default = 0,
  *     use IWL_DISABLE_HT_* constants
- * @amsdu_size_8K: enable 8K amsdu size, default = 1
+ * @amsdu_size_8K: enable 8K amsdu size, default = 0
  * @restart_fw: restart firmware, default = 1
  * @plcp_check: enable plcp health check, default = true
  * @wd_disable: enable stuck queue check, default = 0
index 8c7bec6b9a0be92d29136a47fc20f920a5ce7a6d..0cac2b7af78b99eb1e8c8951a9638c12033e382c 100644 (file)
@@ -186,19 +186,13 @@ struct iwl_rx_packet {
  * @CMD_ASYNC: Return right away and don't want for the response
  * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
  *     response. The caller needs to call iwl_free_resp when done.
- * @CMD_WANT_HCMD: The caller needs to get the HCMD that was sent in the
- *     response handler. Chunks flagged by %IWL_HCMD_DFL_NOCOPY won't be
- *     copied. The pointer passed to the response handler is in the transport
- *     ownership and don't need to be freed by the op_mode. This also means
- *     that the pointer is invalidated after the op_mode's handler returns.
  * @CMD_ON_DEMAND: This command is sent by the test mode pipe.
  */
 enum CMD_MODE {
        CMD_SYNC                = 0,
        CMD_ASYNC               = BIT(0),
        CMD_WANT_SKB            = BIT(1),
-       CMD_WANT_HCMD           = BIT(2),
-       CMD_ON_DEMAND           = BIT(3),
+       CMD_ON_DEMAND           = BIT(2),
 };
 
 #define DEF_CMD_PAYLOAD_SIZE 320
@@ -217,7 +211,11 @@ struct iwl_device_cmd {
 
 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
 
-#define IWL_MAX_CMD_TFDS       2
+/*
+ * number of transfer buffers (fragments) per transmit frame descriptor;
+ * this is just the driver's idea, the hardware supports 20
+ */
+#define IWL_MAX_CMD_TBS_PER_TFD        2
 
 /**
  * struct iwl_hcmd_dataflag - flag for each one of the chunks of the command
@@ -254,15 +252,15 @@ enum iwl_hcmd_dataflag {
  * @id: id of the host command
  */
 struct iwl_host_cmd {
-       const void *data[IWL_MAX_CMD_TFDS];
+       const void *data[IWL_MAX_CMD_TBS_PER_TFD];
        struct iwl_rx_packet *resp_pkt;
        unsigned long _rx_page_addr;
        u32 _rx_page_order;
        int handler_status;
 
        u32 flags;
-       u16 len[IWL_MAX_CMD_TFDS];
-       u8 dataflags[IWL_MAX_CMD_TFDS];
+       u16 len[IWL_MAX_CMD_TBS_PER_TFD];
+       u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
        u8 id;
 };
 
index 23eebda848b05ae64f9ae8dfb6ce5647dded4b19..2adb61f103f4cd310ae654fdd83ff466a843dda5 100644 (file)
@@ -762,18 +762,20 @@ struct iwl_phy_context_cmd {
 #define IWL_RX_INFO_PHY_CNT 8
 #define IWL_RX_INFO_AGC_IDX 1
 #define IWL_RX_INFO_RSSI_AB_IDX 2
-#define IWL_RX_INFO_RSSI_C_IDX 3
-#define IWL_OFDM_AGC_DB_MSK 0xfe00
-#define IWL_OFDM_AGC_DB_POS 9
+#define IWL_OFDM_AGC_A_MSK 0x0000007f
+#define IWL_OFDM_AGC_A_POS 0
+#define IWL_OFDM_AGC_B_MSK 0x00003f80
+#define IWL_OFDM_AGC_B_POS 7
+#define IWL_OFDM_AGC_CODE_MSK 0x3fe00000
+#define IWL_OFDM_AGC_CODE_POS 20
 #define IWL_OFDM_RSSI_INBAND_A_MSK 0x00ff
-#define IWL_OFDM_RSSI_ALLBAND_A_MSK 0xff00
 #define IWL_OFDM_RSSI_A_POS 0
+#define IWL_OFDM_RSSI_ALLBAND_A_MSK 0xff00
+#define IWL_OFDM_RSSI_ALLBAND_A_POS 8
 #define IWL_OFDM_RSSI_INBAND_B_MSK 0xff0000
-#define IWL_OFDM_RSSI_ALLBAND_B_MSK 0xff000000
 #define IWL_OFDM_RSSI_B_POS 16
-#define IWL_OFDM_RSSI_INBAND_C_MSK 0x00ff
-#define IWL_OFDM_RSSI_ALLBAND_C_MSK 0xff00
-#define IWL_OFDM_RSSI_C_POS 0
+#define IWL_OFDM_RSSI_ALLBAND_B_MSK 0xff000000
+#define IWL_OFDM_RSSI_ALLBAND_B_POS 24
 
 /**
  * struct iwl_rx_phy_info - phy info
index d3d959db03a9727e7033a871fc4717654dc3ec10..500f818dba0412df8e5a76d018ddaec761529820 100644 (file)
 #define UCODE_VALID_OK cpu_to_le32(0x1)
 
 /* Default calibration values for WkP - set to INIT image w/o running */
-static const u8 wkp_calib_values_bb_filter[] = { 0xbf, 0x00, 0x5f, 0x00, 0x2f,
-                                                0x00, 0x18, 0x00 };
-static const u8 wkp_calib_values_rx_dc[] = { 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
-                                            0x7f, 0x7f, 0x7f };
-static const u8 wkp_calib_values_tx_lo[] = { 0x00, 0x00, 0x00, 0x00 };
-static const u8 wkp_calib_values_tx_iq[] = { 0xff, 0x00, 0xff, 0x00, 0x00,
-                                            0x00 };
-static const u8 wkp_calib_values_rx_iq[] = { 0xff, 0x00, 0x00, 0x00 };
 static const u8 wkp_calib_values_rx_iq_skew[] = { 0x00, 0x00, 0x01, 0x00 };
 static const u8 wkp_calib_values_tx_iq_skew[] = { 0x01, 0x00, 0x00, 0x00 };
-static const u8 wkp_calib_values_xtal[] = { 0xd2, 0xd2 };
 
 struct iwl_calib_default_data {
        u16 size;
@@ -99,12 +90,7 @@ struct iwl_calib_default_data {
 #define CALIB_SIZE_N_DATA(_buf) {.size = sizeof(_buf), .data = &_buf}
 
 static const struct iwl_calib_default_data wkp_calib_default_data[12] = {
-       [5] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_dc),
-       [6] = CALIB_SIZE_N_DATA(wkp_calib_values_bb_filter),
-       [7] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_lo),
-       [8] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq),
        [9] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq_skew),
-       [10] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq),
        [11] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq_skew),
 };
 
@@ -241,20 +227,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
 
        return 0;
 }
-#define IWL_HW_REV_ID_RAINBOW  0x2
-#define IWL_PROJ_TYPE_LHP      0x5
-
-static u32 iwl_mvm_build_phy_cfg(struct iwl_mvm *mvm)
-{
-       struct iwl_nvm_data *data = mvm->nvm_data;
-       /* Temp calls to static definitions, will be changed to CSR calls */
-       u8 hw_rev_id = IWL_HW_REV_ID_RAINBOW;
-       u8 project_type = IWL_PROJ_TYPE_LHP;
-
-       return data->radio_cfg_dash | (data->radio_cfg_step << 2) |
-               (hw_rev_id << 4) | ((project_type & 0x7f) << 6) |
-               (data->valid_tx_ant << 16) | (data->valid_rx_ant << 20);
-}
 
 static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
 {
@@ -262,7 +234,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
        enum iwl_ucode_type ucode_type = mvm->cur_ucode;
 
        /* Set parameters */
-       phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_build_phy_cfg(mvm));
+       phy_cfg_cmd.phy_cfg = cpu_to_le32(mvm->fw->phy_config);
        phy_cfg_cmd.calib_control.event_trigger =
                mvm->fw->default_calib[ucode_type].event_trigger;
        phy_cfg_cmd.calib_control.flow_trigger =
@@ -275,103 +247,6 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
                                    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
 }
 
-/* Starting with the new PHY DB implementation - New calibs are enabled */
-/* Value - 0x405e7 */
-#define IWL_CALIB_DEFAULT_FLOW_INIT    (IWL_CALIB_CFG_XTAL_IDX         |\
-                                        IWL_CALIB_CFG_TEMPERATURE_IDX  |\
-                                        IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
-                                        IWL_CALIB_CFG_DC_IDX           |\
-                                        IWL_CALIB_CFG_BB_FILTER_IDX    |\
-                                        IWL_CALIB_CFG_LO_LEAKAGE_IDX   |\
-                                        IWL_CALIB_CFG_TX_IQ_IDX        |\
-                                        IWL_CALIB_CFG_RX_IQ_IDX        |\
-                                        IWL_CALIB_CFG_AGC_IDX)
-
-#define IWL_CALIB_DEFAULT_EVENT_INIT   0x0
-
-/* Value 0x41567 */
-#define IWL_CALIB_DEFAULT_FLOW_RUN     (IWL_CALIB_CFG_XTAL_IDX         |\
-                                        IWL_CALIB_CFG_TEMPERATURE_IDX  |\
-                                        IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
-                                        IWL_CALIB_CFG_BB_FILTER_IDX    |\
-                                        IWL_CALIB_CFG_DC_IDX           |\
-                                        IWL_CALIB_CFG_TX_IQ_IDX        |\
-                                        IWL_CALIB_CFG_RX_IQ_IDX        |\
-                                        IWL_CALIB_CFG_SENSITIVITY_IDX  |\
-                                        IWL_CALIB_CFG_AGC_IDX)
-
-#define IWL_CALIB_DEFAULT_EVENT_RUN    (IWL_CALIB_CFG_XTAL_IDX         |\
-                                        IWL_CALIB_CFG_TEMPERATURE_IDX  |\
-                                        IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
-                                        IWL_CALIB_CFG_TX_PWR_IDX       |\
-                                        IWL_CALIB_CFG_DC_IDX           |\
-                                        IWL_CALIB_CFG_TX_IQ_IDX        |\
-                                        IWL_CALIB_CFG_SENSITIVITY_IDX)
-
-/*
- * Sets the calibrations trigger values that will be sent to the FW for runtime
- * and init calibrations.
- * The ones given in the FW TLV are not correct.
- */
-static void iwl_set_default_calib_trigger(struct iwl_mvm *mvm)
-{
-       struct iwl_tlv_calib_ctrl default_calib;
-
-       /*
-        * WkP FW TLV calib bits are wrong, overwrite them.
-        * This defines the dynamic calibrations which are implemented in the
-        * uCode both for init(flow) calculation and event driven calibs.
-        */
-
-       /* Init Image */
-       default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_INIT);
-       default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_INIT);
-
-       if (default_calib.event_trigger !=
-           mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger)
-               IWL_ERR(mvm,
-                       "Updating the event calib for INIT image: 0x%x -> 0x%x\n",
-                       mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger,
-                       default_calib.event_trigger);
-       if (default_calib.flow_trigger !=
-           mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger)
-               IWL_ERR(mvm,
-                       "Updating the flow calib for INIT image: 0x%x -> 0x%x\n",
-                       mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger,
-                       default_calib.flow_trigger);
-
-       memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_INIT],
-              &default_calib, sizeof(struct iwl_tlv_calib_ctrl));
-       IWL_ERR(mvm,
-               "Setting uCode init calibrations event 0x%x, trigger 0x%x\n",
-               default_calib.event_trigger,
-               default_calib.flow_trigger);
-
-       /* Run time image */
-       default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_RUN);
-       default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_RUN);
-
-       if (default_calib.event_trigger !=
-           mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger)
-               IWL_ERR(mvm,
-                       "Updating the event calib for RT image: 0x%x -> 0x%x\n",
-                       mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger,
-                       default_calib.event_trigger);
-       if (default_calib.flow_trigger !=
-           mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger)
-               IWL_ERR(mvm,
-                       "Updating the flow calib for RT image: 0x%x -> 0x%x\n",
-                       mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger,
-                       default_calib.flow_trigger);
-
-       memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_REGULAR],
-              &default_calib, sizeof(struct iwl_tlv_calib_ctrl));
-       IWL_ERR(mvm,
-               "Setting uCode runtime calibs event 0x%x, trigger 0x%x\n",
-               default_calib.event_trigger,
-               default_calib.flow_trigger);
-}
-
 static int iwl_set_default_calibrations(struct iwl_mvm *mvm)
 {
        u8 cmd_raw[16]; /* holds the variable size commands */
@@ -446,8 +321,10 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
        ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
        WARN_ON(ret);
 
-       /* Override the calibrations from TLV and the const of fw */
-       iwl_set_default_calib_trigger(mvm);
+       /* Send TX valid antennas before triggering calibrations */
+       ret = iwl_send_tx_ant_cfg(mvm, mvm->nvm_data->valid_tx_ant);
+       if (ret)
+               goto error;
 
        /* WkP doesn't have all calibrations, need to set default values */
        if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
index 537711b1047865cf42fb9ed271369917c4f736fb..bdae700c769eb292223d8871b1789a46f6f13610 100644 (file)
@@ -80,7 +80,8 @@
 
 #define IWL_INVALID_MAC80211_QUEUE     0xff
 #define IWL_MVM_MAX_ADDRESSES          2
-#define IWL_RSSI_OFFSET 44
+/* RSSI offset for WkP */
+#define IWL_RSSI_OFFSET 50
 
 enum iwl_mvm_tx_fifo {
        IWL_MVM_TX_FIFO_BK = 0,
index aa59adf87db333f66b0a2e925ddd66afd574ae60..d0f9c1e0475e71659576b609b8f3365196435f02 100644 (file)
@@ -624,12 +624,8 @@ static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
        ieee80211_free_txskb(mvm->hw, skb);
 }
 
-static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
+static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
 {
-       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
-
-       iwl_mvm_dump_nic_error_log(mvm);
-
        iwl_abort_notification_waits(&mvm->notif_wait);
 
        /*
@@ -663,9 +659,21 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
        }
 }
 
+static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
+{
+       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+       iwl_mvm_dump_nic_error_log(mvm);
+
+       iwl_mvm_nic_restart(mvm);
+}
+
 static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
 {
+       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
        WARN_ON(1);
+       iwl_mvm_nic_restart(mvm);
 }
 
 static const struct iwl_op_mode_ops iwl_mvm_ops = {
index 3f40ab05bbd8181d55d4e1999ebc77f5f977b6f9..b0b190d0ec23659279f9a33e7d57b21428114f30 100644 (file)
@@ -131,33 +131,42 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
 static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
                             struct iwl_rx_phy_info *phy_info)
 {
-       u32 rssi_a, rssi_b, rssi_c, max_rssi, agc_db;
+       int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
+       int rssi_all_band_a, rssi_all_band_b;
+       u32 agc_a, agc_b, max_agc;
        u32 val;
 
-       /* Find max rssi among 3 possible receivers.
+       /* Find max rssi among 2 possible receivers.
         * These values are measured by the Digital Signal Processor (DSP).
         * They should stay fairly constant even as the signal strength varies,
         * if the radio's Automatic Gain Control (AGC) is working right.
         * AGC value (see below) will provide the "interesting" info.
         */
+       val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
+       agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS;
+       agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS;
+       max_agc = max_t(u32, agc_a, agc_b);
+
        val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]);
        rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS;
        rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS;
-       val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_C_IDX]);
-       rssi_c = (val & IWL_OFDM_RSSI_INBAND_C_MSK) >> IWL_OFDM_RSSI_C_POS;
-
-       val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
-       agc_db = (val & IWL_OFDM_AGC_DB_MSK) >> IWL_OFDM_AGC_DB_POS;
+       rssi_all_band_a = (val & IWL_OFDM_RSSI_ALLBAND_A_MSK) >>
+                               IWL_OFDM_RSSI_ALLBAND_A_POS;
+       rssi_all_band_b = (val & IWL_OFDM_RSSI_ALLBAND_B_MSK) >>
+                               IWL_OFDM_RSSI_ALLBAND_B_POS;
 
-       max_rssi = max_t(u32, rssi_a, rssi_b);
-       max_rssi = max_t(u32, max_rssi, rssi_c);
+       /*
+        * dBm = rssi dB - agc dB - constant.
+        * Higher AGC (higher radio gain) means lower signal.
+        */
+       rssi_a_dbm = rssi_a - IWL_RSSI_OFFSET - agc_a;
+       rssi_b_dbm = rssi_b - IWL_RSSI_OFFSET - agc_b;
+       max_rssi_dbm = max_t(int, rssi_a_dbm, rssi_b_dbm);
 
-       IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
-                       rssi_a, rssi_b, rssi_c, max_rssi, agc_db);
+       IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
+                       rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
 
-       /* dBm = max_rssi dB - agc dB - constant.
-        * Higher AGC (higher radio gain) means lower signal. */
-       return max_rssi - agc_db - IWL_RSSI_OFFSET;
+       return max_rssi_dbm;
 }
 
 /*
index 861a7f9f8e7f4c0a932e4ce71e607bdc8a3b94fa..274f44e2ef60e52d1fb39f66735c98563036449e 100644 (file)
@@ -770,6 +770,16 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        u16 txq_id;
        int err;
 
+
+       /*
+        * If mac80211 is cleaning its state, then say that we finished since
+        * our state has been cleared anyway.
+        */
+       if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+               ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+               return 0;
+       }
+
        spin_lock_bh(&mvmsta->lock);
 
        txq_id = tid_data->txq_id;
index 6b67ce3f679ceb8ad4907c14c32926d0828f9f75..6645efe5c03edfdc3c4523f0c06ab1d7bc0c7629 100644 (file)
@@ -607,12 +607,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
 
                /* Single frame failure in an AMPDU queue => send BAR */
                if (txq_id >= IWL_FIRST_AMPDU_QUEUE &&
-                   !(info->flags & IEEE80211_TX_STAT_ACK)) {
-                       /* there must be only one skb in the skb_list */
-                       WARN_ON_ONCE(skb_freed > 1 ||
-                                    !skb_queue_empty(&skbs));
+                   !(info->flags & IEEE80211_TX_STAT_ACK))
                        info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
-               }
 
                /* W/A FW bug: seq_ctl is wrong when the queue is flushed */
                if (status == TX_STATUS_FAIL_FIFO_FLUSHED) {
index 3d62e8055352b667ee4a776f4d218d7d4d3431ee..148843e7f34f26f9ea98d8ae269e366765cba115 100644 (file)
@@ -137,10 +137,6 @@ static inline int iwl_queue_dec_wrap(int index, int n_bd)
 struct iwl_cmd_meta {
        /* only for SYNC commands, iff the reply skb is wanted */
        struct iwl_host_cmd *source;
-
-       DEFINE_DMA_UNMAP_ADDR(mapping);
-       DEFINE_DMA_UNMAP_LEN(len);
-
        u32 flags;
 };
 
@@ -185,25 +181,36 @@ struct iwl_queue {
 /*
  * The FH will write back to the first TB only, so we need
  * to copy some data into the buffer regardless of whether
- * it should be mapped or not. This indicates how much to
- * copy, even for HCMDs it must be big enough to fit the
- * DRAM scratch from the TX cmd, at least 16 bytes.
+ * it should be mapped or not. This indicates how big the
+ * first TB must be to include the scratch buffer. Since
+ * the scratch is 4 bytes at offset 12, it's 16 now. If we
+ * make it bigger then allocations will be bigger and copy
+ * slower, so that's probably not useful.
  */
-#define IWL_HCMD_MIN_COPY_SIZE 16
+#define IWL_HCMD_SCRATCHBUF_SIZE       16
 
 struct iwl_pcie_txq_entry {
        struct iwl_device_cmd *cmd;
-       struct iwl_device_cmd *copy_cmd;
        struct sk_buff *skb;
        /* buffer to free after command completes */
        const void *free_buf;
        struct iwl_cmd_meta meta;
 };
 
+struct iwl_pcie_txq_scratch_buf {
+       struct iwl_cmd_header hdr;
+       u8 buf[8];
+       __le32 scratch;
+};
+
 /**
  * struct iwl_txq - Tx Queue for DMA
  * @q: generic Rx/Tx queue descriptor
  * @tfds: transmit frame descriptors (DMA memory)
+ * @scratchbufs: start of command headers, including scratch buffers, for
+ *     the writeback -- this is DMA memory and an array holding one buffer
+ *     for each command on the queue
+ * @scratchbufs_dma: DMA address for the scratchbufs start
  * @entries: transmit entries (driver state)
  * @lock: queue lock
  * @stuck_timer: timer that fires if queue gets stuck
@@ -217,6 +224,8 @@ struct iwl_pcie_txq_entry {
 struct iwl_txq {
        struct iwl_queue q;
        struct iwl_tfd *tfds;
+       struct iwl_pcie_txq_scratch_buf *scratchbufs;
+       dma_addr_t scratchbufs_dma;
        struct iwl_pcie_txq_entry *entries;
        spinlock_t lock;
        struct timer_list stuck_timer;
@@ -225,6 +234,13 @@ struct iwl_txq {
        u8 active;
 };
 
+static inline dma_addr_t
+iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
+{
+       return txq->scratchbufs_dma +
+              sizeof(struct iwl_pcie_txq_scratch_buf) * idx;
+}
+
 /**
  * struct iwl_trans_pcie - PCIe transport specific data
  * @rxq: all the RX queue data
index b0ae06d2456f1c388ba47fcdbb88f74063ea1162..567e67ad1f617682cbba7a90f2ca95e53eb07eea 100644 (file)
@@ -637,22 +637,14 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                index = SEQ_TO_INDEX(sequence);
                cmd_index = get_cmd_index(&txq->q, index);
 
-               if (reclaim) {
-                       struct iwl_pcie_txq_entry *ent;
-                       ent = &txq->entries[cmd_index];
-                       cmd = ent->copy_cmd;
-                       WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD);
-               } else {
+               if (reclaim)
+                       cmd = txq->entries[cmd_index].cmd;
+               else
                        cmd = NULL;
-               }
 
                err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
 
                if (reclaim) {
-                       /* The original command isn't needed any more */
-                       kfree(txq->entries[cmd_index].copy_cmd);
-                       txq->entries[cmd_index].copy_cmd = NULL;
-                       /* nor is the duplicated part of the command */
                        kfree(txq->entries[cmd_index].free_buf);
                        txq->entries[cmd_index].free_buf = NULL;
                }
index 8b625a7f56859a32523e73f1e502d967f65d8aea..8595c16f74deb8bdcf531725e3c65c0986d58745 100644 (file)
@@ -191,12 +191,9 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
        }
 
        for (i = q->read_ptr; i != q->write_ptr;
-            i = iwl_queue_inc_wrap(i, q->n_bd)) {
-               struct iwl_tx_cmd *tx_cmd =
-                       (struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
+            i = iwl_queue_inc_wrap(i, q->n_bd))
                IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
-                       get_unaligned_le32(&tx_cmd->scratch));
-       }
+                       le32_to_cpu(txq->scratchbufs[i].scratch));
 
        iwl_op_mode_nic_error(trans->op_mode);
 }
@@ -367,8 +364,8 @@ static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
 }
 
 static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
-                              struct iwl_cmd_meta *meta, struct iwl_tfd *tfd,
-                              enum dma_data_direction dma_dir)
+                              struct iwl_cmd_meta *meta,
+                              struct iwl_tfd *tfd)
 {
        int i;
        int num_tbs;
@@ -382,17 +379,12 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
                return;
        }
 
-       /* Unmap tx_cmd */
-       if (num_tbs)
-               dma_unmap_single(trans->dev,
-                               dma_unmap_addr(meta, mapping),
-                               dma_unmap_len(meta, len),
-                               DMA_BIDIRECTIONAL);
+       /* first TB is never freed - it's the scratchbuf data */
 
-       /* Unmap chunks, if any. */
        for (i = 1; i < num_tbs; i++)
                dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
-                                iwl_pcie_tfd_tb_get_len(tfd, i), dma_dir);
+                                iwl_pcie_tfd_tb_get_len(tfd, i),
+                                DMA_TO_DEVICE);
 
        tfd->num_tbs = 0;
 }
@@ -406,8 +398,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
  * Does NOT advance any TFD circular buffer read/write indexes
  * Does NOT free the TFD itself (which is within circular buffer)
  */
-static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
-                                 enum dma_data_direction dma_dir)
+static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
 {
        struct iwl_tfd *tfd_tmp = txq->tfds;
 
@@ -418,8 +409,7 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
        lockdep_assert_held(&txq->lock);
 
        /* We have only q->n_window txq->entries, but we use q->n_bd tfds */
-       iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr],
-                          dma_dir);
+       iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
 
        /* free SKB */
        if (txq->entries) {
@@ -479,6 +469,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
+       size_t scratchbuf_sz;
        int i;
 
        if (WARN_ON(txq->entries || txq->tfds))
@@ -514,9 +505,25 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
                IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
                goto error;
        }
+
+       BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs));
+       BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) !=
+                       sizeof(struct iwl_cmd_header) +
+                       offsetof(struct iwl_tx_cmd, scratch));
+
+       scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num;
+
+       txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz,
+                                             &txq->scratchbufs_dma,
+                                             GFP_KERNEL);
+       if (!txq->scratchbufs)
+               goto err_free_tfds;
+
        txq->q.id = txq_id;
 
        return 0;
+err_free_tfds:
+       dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr);
 error:
        if (txq->entries && txq_id == trans_pcie->cmd_queue)
                for (i = 0; i < slots_num; i++)
@@ -565,22 +572,13 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_txq *txq = &trans_pcie->txq[txq_id];
        struct iwl_queue *q = &txq->q;
-       enum dma_data_direction dma_dir;
 
        if (!q->n_bd)
                return;
 
-       /* In the command queue, all the TBs are mapped as BIDI
-        * so unmap them as such.
-        */
-       if (txq_id == trans_pcie->cmd_queue)
-               dma_dir = DMA_BIDIRECTIONAL;
-       else
-               dma_dir = DMA_TO_DEVICE;
-
        spin_lock_bh(&txq->lock);
        while (q->write_ptr != q->read_ptr) {
-               iwl_pcie_txq_free_tfd(trans, txq, dma_dir);
+               iwl_pcie_txq_free_tfd(trans, txq);
                q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
        }
        spin_unlock_bh(&txq->lock);
@@ -610,7 +608,6 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
        if (txq_id == trans_pcie->cmd_queue)
                for (i = 0; i < txq->q.n_window; i++) {
                        kfree(txq->entries[i].cmd);
-                       kfree(txq->entries[i].copy_cmd);
                        kfree(txq->entries[i].free_buf);
                }
 
@@ -619,6 +616,10 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
                dma_free_coherent(dev, sizeof(struct iwl_tfd) *
                                  txq->q.n_bd, txq->tfds, txq->q.dma_addr);
                txq->q.dma_addr = 0;
+
+               dma_free_coherent(dev,
+                                 sizeof(*txq->scratchbufs) * txq->q.n_window,
+                                 txq->scratchbufs, txq->scratchbufs_dma);
        }
 
        kfree(txq->entries);
@@ -962,7 +963,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
 
                iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
 
-               iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
+               iwl_pcie_txq_free_tfd(trans, txq);
        }
 
        iwl_pcie_txq_progress(trans_pcie, txq);
@@ -1152,29 +1153,29 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
        void *dup_buf = NULL;
        dma_addr_t phys_addr;
        int idx;
-       u16 copy_size, cmd_size, dma_size;
+       u16 copy_size, cmd_size, scratch_size;
        bool had_nocopy = false;
        int i;
        u32 cmd_pos;
-       const u8 *cmddata[IWL_MAX_CMD_TFDS];
-       u16 cmdlen[IWL_MAX_CMD_TFDS];
+       const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
+       u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
 
        copy_size = sizeof(out_cmd->hdr);
        cmd_size = sizeof(out_cmd->hdr);
 
        /* need one for the header if the first is NOCOPY */
-       BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
+       BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
 
-       for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
+       for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
                cmddata[i] = cmd->data[i];
                cmdlen[i] = cmd->len[i];
 
                if (!cmd->len[i])
                        continue;
 
-               /* need at least IWL_HCMD_MIN_COPY_SIZE copied */
-               if (copy_size < IWL_HCMD_MIN_COPY_SIZE) {
-                       int copy = IWL_HCMD_MIN_COPY_SIZE - copy_size;
+               /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
+               if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
+                       int copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
 
                        if (copy > cmdlen[i])
                                copy = cmdlen[i];
@@ -1260,15 +1261,15 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
        /* and copy the data that needs to be copied */
        cmd_pos = offsetof(struct iwl_device_cmd, payload);
        copy_size = sizeof(out_cmd->hdr);
-       for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
+       for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
                int copy = 0;
 
                if (!cmd->len)
                        continue;
 
-               /* need at least IWL_HCMD_MIN_COPY_SIZE copied */
-               if (copy_size < IWL_HCMD_MIN_COPY_SIZE) {
-                       copy = IWL_HCMD_MIN_COPY_SIZE - copy_size;
+               /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
+               if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
+                       copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
 
                        if (copy > cmd->len[i])
                                copy = cmd->len[i];
@@ -1286,50 +1287,38 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
                }
        }
 
-       WARN_ON_ONCE(txq->entries[idx].copy_cmd);
-
-       /*
-        * since out_cmd will be the source address of the FH, it will write
-        * the retry count there. So when the user needs to receivce the HCMD
-        * that corresponds to the response in the response handler, it needs
-        * to set CMD_WANT_HCMD.
-        */
-       if (cmd->flags & CMD_WANT_HCMD) {
-               txq->entries[idx].copy_cmd =
-                       kmemdup(out_cmd, cmd_pos, GFP_ATOMIC);
-               if (unlikely(!txq->entries[idx].copy_cmd)) {
-                       idx = -ENOMEM;
-                       goto out;
-               }
-       }
-
        IWL_DEBUG_HC(trans,
                     "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
                     get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
                     out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
                     cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
 
-       /*
-        * If the entire command is smaller than IWL_HCMD_MIN_COPY_SIZE, we must
-        * still map at least that many bytes for the hardware to write back to.
-        * We have enough space, so that's not a problem.
-        */
-       dma_size = max_t(u16, copy_size, IWL_HCMD_MIN_COPY_SIZE);
+       /* start the TFD with the scratchbuf */
+       scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE);
+       memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size);
+       iwl_pcie_txq_build_tfd(trans, txq,
+                              iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr),
+                              scratch_size, 1);
+
+       /* map first command fragment, if any remains */
+       if (copy_size > scratch_size) {
+               phys_addr = dma_map_single(trans->dev,
+                                          ((u8 *)&out_cmd->hdr) + scratch_size,
+                                          copy_size - scratch_size,
+                                          DMA_TO_DEVICE);
+               if (dma_mapping_error(trans->dev, phys_addr)) {
+                       iwl_pcie_tfd_unmap(trans, out_meta,
+                                          &txq->tfds[q->write_ptr]);
+                       idx = -ENOMEM;
+                       goto out;
+               }
 
-       phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, dma_size,
-                                  DMA_BIDIRECTIONAL);
-       if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
-               idx = -ENOMEM;
-               goto out;
+               iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
+                                      copy_size - scratch_size, 0);
        }
 
-       dma_unmap_addr_set(out_meta, mapping, phys_addr);
-       dma_unmap_len_set(out_meta, len, dma_size);
-
-       iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1);
-
        /* map the remaining (adjusted) nocopy/dup fragments */
-       for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
+       for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
                const void *data = cmddata[i];
 
                if (!cmdlen[i])
@@ -1340,11 +1329,10 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
                if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
                        data = dup_buf;
                phys_addr = dma_map_single(trans->dev, (void *)data,
-                                          cmdlen[i], DMA_BIDIRECTIONAL);
+                                          cmdlen[i], DMA_TO_DEVICE);
                if (dma_mapping_error(trans->dev, phys_addr)) {
                        iwl_pcie_tfd_unmap(trans, out_meta,
-                                          &txq->tfds[q->write_ptr],
-                                          DMA_BIDIRECTIONAL);
+                                          &txq->tfds[q->write_ptr]);
                        idx = -ENOMEM;
                        goto out;
                }
@@ -1418,7 +1406,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
        cmd = txq->entries[cmd_index].cmd;
        meta = &txq->entries[cmd_index].meta;
 
-       iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
+       iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
 
        /* Input error checking is done when commands are added to queue. */
        if (meta->flags & CMD_WANT_SKB) {
@@ -1597,10 +1585,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
        struct iwl_cmd_meta *out_meta;
        struct iwl_txq *txq;
        struct iwl_queue *q;
-       dma_addr_t phys_addr = 0;
-       dma_addr_t txcmd_phys;
-       dma_addr_t scratch_phys;
-       u16 len, firstlen, secondlen;
+       dma_addr_t tb0_phys, tb1_phys, scratch_phys;
+       void *tb1_addr;
+       u16 len, tb1_len, tb2_len;
        u8 wait_write_ptr = 0;
        __le16 fc = hdr->frame_control;
        u8 hdr_len = ieee80211_hdrlen(fc);
@@ -1638,85 +1625,80 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
                cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
                            INDEX_TO_SEQ(q->write_ptr)));
 
+       tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr);
+       scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
+                      offsetof(struct iwl_tx_cmd, scratch);
+
+       tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
+       tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
+
        /* Set up first empty entry in queue's array of Tx/cmd buffers */
        out_meta = &txq->entries[q->write_ptr].meta;
 
        /*
-        * Use the first empty entry in this queue's command buffer array
-        * to contain the Tx command and MAC header concatenated together
-        * (payload data will be in another buffer).
-        * Size of this varies, due to varying MAC header length.
-        * If end is not dword aligned, we'll have 2 extra bytes at the end
-        * of the MAC header (device reads on dword boundaries).
-        * We'll tell device about this padding later.
+        * The second TB (tb1) points to the remainder of the TX command
+        * and the 802.11 header - dword aligned size
+        * (This calculation modifies the TX command, so do it before the
+        * setup of the first TB)
         */
-       len = sizeof(struct iwl_tx_cmd) +
-               sizeof(struct iwl_cmd_header) + hdr_len;
-       firstlen = (len + 3) & ~3;
+       len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
+             hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
+       tb1_len = (len + 3) & ~3;
 
        /* Tell NIC about any 2-byte padding after MAC header */
-       if (firstlen != len)
+       if (tb1_len != len)
                tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
 
-       /* Physical address of this Tx command's header (not MAC header!),
-        * within command buffer array. */
-       txcmd_phys = dma_map_single(trans->dev,
-                                   &dev_cmd->hdr, firstlen,
-                                   DMA_BIDIRECTIONAL);
-       if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
-               goto out_err;
-       dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
-       dma_unmap_len_set(out_meta, len, firstlen);
+       /* The first TB points to the scratchbuf data - min_copy bytes */
+       memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
+              IWL_HCMD_SCRATCHBUF_SIZE);
+       iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
+                              IWL_HCMD_SCRATCHBUF_SIZE, 1);
 
-       if (!ieee80211_has_morefrags(fc)) {
-               txq->need_update = 1;
-       } else {
-               wait_write_ptr = 1;
-               txq->need_update = 0;
-       }
+       /* there must be data left over for TB1 or this code must be changed */
+       BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE);
 
-       /* Set up TFD's 2nd entry to point directly to remainder of skb,
-        * if any (802.11 null frames have no payload). */
-       secondlen = skb->len - hdr_len;
-       if (secondlen > 0) {
-               phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
-                                          secondlen, DMA_TO_DEVICE);
-               if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
-                       dma_unmap_single(trans->dev,
-                                        dma_unmap_addr(out_meta, mapping),
-                                        dma_unmap_len(out_meta, len),
-                                        DMA_BIDIRECTIONAL);
+       /* map the data for TB1 */
+       tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_HCMD_SCRATCHBUF_SIZE;
+       tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
+               goto out_err;
+       iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, 0);
+
+       /*
+        * Set up TFD's third entry to point directly to remainder
+        * of skb, if any (802.11 null frames have no payload).
+        */
+       tb2_len = skb->len - hdr_len;
+       if (tb2_len > 0) {
+               dma_addr_t tb2_phys = dma_map_single(trans->dev,
+                                                    skb->data + hdr_len,
+                                                    tb2_len, DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
+                       iwl_pcie_tfd_unmap(trans, out_meta,
+                                          &txq->tfds[q->write_ptr]);
                        goto out_err;
                }
+               iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, 0);
        }
 
-       /* Attach buffers to TFD */
-       iwl_pcie_txq_build_tfd(trans, txq, txcmd_phys, firstlen, 1);
-       if (secondlen > 0)
-               iwl_pcie_txq_build_tfd(trans, txq, phys_addr, secondlen, 0);
-
-       scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
-                               offsetof(struct iwl_tx_cmd, scratch);
-
-       /* take back ownership of DMA buffer to enable update */
-       dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
-                               DMA_BIDIRECTIONAL);
-       tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
-       tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
-
        /* Set up entry for this TFD in Tx byte-count array */
        iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
 
-       dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
-                                  DMA_BIDIRECTIONAL);
-
        trace_iwlwifi_dev_tx(trans->dev, skb,
                             &txq->tfds[txq->q.write_ptr],
                             sizeof(struct iwl_tfd),
-                            &dev_cmd->hdr, firstlen,
-                            skb->data + hdr_len, secondlen);
+                            &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
+                            skb->data + hdr_len, tb2_len);
        trace_iwlwifi_dev_tx_data(trans->dev, skb,
-                                 skb->data + hdr_len, secondlen);
+                                 skb->data + hdr_len, tb2_len);
+
+       if (!ieee80211_has_morefrags(fc)) {
+               txq->need_update = 1;
+       } else {
+               wait_write_ptr = 1;
+               txq->need_update = 0;
+       }
 
        /* start timer if queue currently empty */
        if (txq->need_update && q->read_ptr == q->write_ptr &&
index 808f5fcd1ced92127f8786700a759465e0708060..fb306814576affa7e636fa53353d29ba1e39b668 100644 (file)
@@ -3290,14 +3290,19 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
        int ret = -ENODATA;
 
        rcu_read_lock();
-       if (local->use_chanctx) {
-               chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
-               if (chanctx_conf) {
-                       *chandef = chanctx_conf->def;
-                       ret = 0;
-               }
-       } else if (local->open_count == local->monitors) {
-               *chandef = local->monitor_chandef;
+       chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+       if (chanctx_conf) {
+               *chandef = chanctx_conf->def;
+               ret = 0;
+       } else if (local->open_count > 0 &&
+                  local->open_count == local->monitors &&
+                  sdata->vif.type == NL80211_IFTYPE_MONITOR) {
+               if (local->use_chanctx)
+                       *chandef = local->monitor_chandef;
+               else
+                       cfg80211_chandef_create(chandef,
+                                               local->_oper_channel,
+                                               local->_oper_channel_type);
                ret = 0;
        }
        rcu_read_unlock();
index 640afab304d7a02e50414a16f3df8e22c6ea4c30..baaa8608e52de8d9d504f14a254bb7e6631b5f2e 100644 (file)
@@ -541,6 +541,9 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
 
                ieee80211_adjust_monitor_flags(sdata, 1);
                ieee80211_configure_filter(local);
+               mutex_lock(&local->mtx);
+               ieee80211_recalc_idle(local);
+               mutex_unlock(&local->mtx);
 
                netif_carrier_on(dev);
                break;
@@ -812,6 +815,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
 
                ieee80211_adjust_monitor_flags(sdata, -1);
                ieee80211_configure_filter(local);
+               mutex_lock(&local->mtx);
+               ieee80211_recalc_idle(local);
+               mutex_unlock(&local->mtx);
                break;
        case NL80211_IFTYPE_P2P_DEVICE:
                /* relies on synchronize_rcu() below */
index 9f6464f3e05f94e0025c4449b4a29e35bc185725..141577412d8407fc8b18ad354a34ad8de105f154 100644 (file)
@@ -647,6 +647,9 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
                our_mcs = (le16_to_cpu(vht_cap.vht_mcs.rx_mcs_map) &
                                                                mask) >> shift;
 
+               if (our_mcs == IEEE80211_VHT_MCS_NOT_SUPPORTED)
+                       continue;
+
                switch (ap_mcs) {
                default:
                        if (our_mcs <= ap_mcs)
@@ -3502,6 +3505,14 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 
+       /*
+        * Stop timers before deleting work items, as timers
+        * could race and re-add the work-items. They will be
+        * re-established on connection.
+        */
+       del_timer_sync(&ifmgd->conn_mon_timer);
+       del_timer_sync(&ifmgd->bcn_mon_timer);
+
        /*
         * we need to use atomic bitops for the running bits
         * only because both timers might fire at the same
@@ -3516,13 +3527,9 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
        if (del_timer_sync(&ifmgd->timer))
                set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
 
-       cancel_work_sync(&ifmgd->chswitch_work);
        if (del_timer_sync(&ifmgd->chswitch_timer))
                set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running);
-
-       /* these will just be re-established on connection */
-       del_timer_sync(&ifmgd->conn_mon_timer);
-       del_timer_sync(&ifmgd->bcn_mon_timer);
+       cancel_work_sync(&ifmgd->chswitch_work);
 }
 
 void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
@@ -4315,6 +4322,17 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 
+       /*
+        * Make sure some work items will not run after this,
+        * they will not do anything but might not have been
+        * cancelled when disconnecting.
+        */
+       cancel_work_sync(&ifmgd->monitor_work);
+       cancel_work_sync(&ifmgd->beacon_connection_loss_work);
+       cancel_work_sync(&ifmgd->request_smps_work);
+       cancel_work_sync(&ifmgd->csa_connection_drop_work);
+       cancel_work_sync(&ifmgd->chswitch_work);
+
        mutex_lock(&ifmgd->mtx);
        if (ifmgd->assoc_data)
                ieee80211_destroy_assoc_data(sdata, false);
index ce78d1149f1dc2e873eb4291ce9459b40a89b4db..8914d2d2881aab77c5741479d6dbf516db59e3b3 100644 (file)
@@ -2745,7 +2745,8 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
                                cpu_to_le16(IEEE80211_FCTL_MOREDATA);
                }
 
-               sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
+               if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+                       sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
                if (!ieee80211_tx_prepare(sdata, &tx, skb))
                        break;
                dev_kfree_skb_any(skb);
index 5ffff039b0174eefb8f3967dc6f5307ea7cb2f8c..ea4155fe97334f91794dbb7a55f0dab28219c2a3 100644 (file)
@@ -367,8 +367,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
        rdev->wiphy.rts_threshold = (u32) -1;
        rdev->wiphy.coverage_class = 0;
 
-       rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH |
-                              NL80211_FEATURE_ADVERTISE_CHAN_LIMITS;
+       rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH;
 
        return &rdev->wiphy;
 }
index e652d05ff712714f597857b225b432aafea1c34c..d44ab216c0ecd8b01d4386edf62cd07c3f2071db 100644 (file)
@@ -557,18 +557,6 @@ static int nl80211_msg_put_channel(struct sk_buff *msg,
        if ((chan->flags & IEEE80211_CHAN_RADAR) &&
            nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR))
                goto nla_put_failure;
-       if ((chan->flags & IEEE80211_CHAN_NO_HT40MINUS) &&
-           nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_MINUS))
-               goto nla_put_failure;
-       if ((chan->flags & IEEE80211_CHAN_NO_HT40PLUS) &&
-           nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_PLUS))
-               goto nla_put_failure;
-       if ((chan->flags & IEEE80211_CHAN_NO_80MHZ) &&
-           nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_80MHZ))
-               goto nla_put_failure;
-       if ((chan->flags & IEEE80211_CHAN_NO_160MHZ) &&
-           nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_160MHZ))
-               goto nla_put_failure;
 
        if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
                        DBM_TO_MBM(chan->max_power)))
@@ -1310,15 +1298,6 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag
                        dev->wiphy.max_acl_mac_addrs))
                goto nla_put_failure;
 
-       if (dev->wiphy.extended_capabilities &&
-           (nla_put(msg, NL80211_ATTR_EXT_CAPA,
-                    dev->wiphy.extended_capabilities_len,
-                    dev->wiphy.extended_capabilities) ||
-            nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK,
-                    dev->wiphy.extended_capabilities_len,
-                    dev->wiphy.extended_capabilities_mask)))
-               goto nla_put_failure;
-
        return genlmsg_end(msg, hdr);
 
  nla_put_failure:
@@ -1328,7 +1307,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag
 
 static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
 {
-       int idx = 0;
+       int idx = 0, ret;
        int start = cb->args[0];
        struct cfg80211_registered_device *dev;
 
@@ -1338,9 +1317,29 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
                        continue;
                if (++idx <= start)
                        continue;
-               if (nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).portid,
-                                      cb->nlh->nlmsg_seq, NLM_F_MULTI,
-                                      dev) < 0) {
+               ret = nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).portid,
+                                        cb->nlh->nlmsg_seq, NLM_F_MULTI,
+                                        dev);
+               if (ret < 0) {
+                       /*
+                        * If sending the wiphy data didn't fit (ENOBUFS or
+                        * EMSGSIZE returned), this SKB is still empty (so
+                        * it's not too big because another wiphy dataset is
+                        * already in the skb) and we've not tried to adjust
+                        * the dump allocation yet ... then adjust the alloc
+                        * size to be bigger, and return 1 but with the empty
+                        * skb. This results in an empty message being RX'ed
+                        * in userspace, but that is ignored.
+                        *
+                        * We can then retry with the larger buffer.
+                        */
+                       if ((ret == -ENOBUFS || ret == -EMSGSIZE) &&
+                           !skb->len &&
+                           cb->min_dump_alloc < 4096) {
+                               cb->min_dump_alloc = 4096;
+                               mutex_unlock(&cfg80211_mutex);
+                               return 1;
+                       }
                        idx--;
                        break;
                }
@@ -1357,7 +1356,7 @@ static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info)
        struct sk_buff *msg;
        struct cfg80211_registered_device *dev = info->user_ptr[0];
 
-       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       msg = nlmsg_new(4096, GFP_KERNEL);
        if (!msg)
                return -ENOMEM;