1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
5 #include <linux/skbuff.h>
6 #include <linux/ctype.h>
7 #include <net/mac80211.h>
8 #include <net/cfg80211.h>
9 #include <linux/completion.h>
10 #include <linux/if_ether.h>
11 #include <linux/types.h>
12 #include <linux/pci.h>
13 #include <linux/uuid.h>
14 #include <linux/time.h>
22 struct wmi_tlv_policy
{
26 struct wmi_tlv_svc_ready_parse
{
27 bool wmi_svc_bitmap_done
;
30 struct wmi_tlv_dma_ring_caps_parse
{
31 struct wmi_dma_ring_capabilities
*dma_ring_caps
;
35 struct wmi_tlv_svc_rdy_ext_parse
{
36 struct ath11k_service_ext_param param
;
37 struct wmi_soc_mac_phy_hw_mode_caps
*hw_caps
;
38 struct wmi_hw_mode_capabilities
*hw_mode_caps
;
41 struct wmi_hw_mode_capabilities pref_hw_mode_caps
;
42 struct wmi_mac_phy_capabilities
*mac_phy_caps
;
44 struct wmi_soc_hal_reg_capabilities
*soc_hal_reg_caps
;
45 struct wmi_hal_reg_capabilities_ext
*ext_hal_reg_caps
;
46 u32 n_ext_hal_reg_caps
;
47 struct wmi_tlv_dma_ring_caps_parse dma_caps_parse
;
50 bool ext_hal_reg_done
;
51 bool mac_phy_chainmask_combo_done
;
52 bool mac_phy_chainmask_cap_done
;
53 bool oem_dma_ring_cap_done
;
54 bool dma_ring_cap_done
;
57 struct wmi_tlv_svc_rdy_ext2_parse
{
58 struct wmi_tlv_dma_ring_caps_parse dma_caps_parse
;
59 bool dma_ring_cap_done
;
62 struct wmi_tlv_rdy_parse
{
63 u32 num_extra_mac_addr
;
66 struct wmi_tlv_dma_buf_release_parse
{
67 struct ath11k_wmi_dma_buf_release_fixed_param fixed
;
68 struct wmi_dma_buf_release_entry
*buf_entry
;
69 struct wmi_dma_buf_release_meta_data
*meta_data
;
76 static const struct wmi_tlv_policy wmi_tlv_policies
[] = {
79 [WMI_TAG_ARRAY_UINT32
]
81 [WMI_TAG_SERVICE_READY_EVENT
]
82 = { .min_len
= sizeof(struct wmi_service_ready_event
) },
83 [WMI_TAG_SERVICE_READY_EXT_EVENT
]
84 = { .min_len
= sizeof(struct wmi_service_ready_ext_event
) },
85 [WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS
]
86 = { .min_len
= sizeof(struct wmi_soc_mac_phy_hw_mode_caps
) },
87 [WMI_TAG_SOC_HAL_REG_CAPABILITIES
]
88 = { .min_len
= sizeof(struct wmi_soc_hal_reg_capabilities
) },
89 [WMI_TAG_VDEV_START_RESPONSE_EVENT
]
90 = { .min_len
= sizeof(struct wmi_vdev_start_resp_event
) },
91 [WMI_TAG_PEER_DELETE_RESP_EVENT
]
92 = { .min_len
= sizeof(struct wmi_peer_delete_resp_event
) },
93 [WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT
]
94 = { .min_len
= sizeof(struct wmi_bcn_tx_status_event
) },
95 [WMI_TAG_VDEV_STOPPED_EVENT
]
96 = { .min_len
= sizeof(struct wmi_vdev_stopped_event
) },
97 [WMI_TAG_REG_CHAN_LIST_CC_EVENT
]
98 = { .min_len
= sizeof(struct wmi_reg_chan_list_cc_event
) },
100 = { .min_len
= sizeof(struct wmi_mgmt_rx_hdr
) },
101 [WMI_TAG_MGMT_TX_COMPL_EVENT
]
102 = { .min_len
= sizeof(struct wmi_mgmt_tx_compl_event
) },
104 = { .min_len
= sizeof(struct wmi_scan_event
) },
105 [WMI_TAG_PEER_STA_KICKOUT_EVENT
]
106 = { .min_len
= sizeof(struct wmi_peer_sta_kickout_event
) },
108 = { .min_len
= sizeof(struct wmi_roam_event
) },
109 [WMI_TAG_CHAN_INFO_EVENT
]
110 = { .min_len
= sizeof(struct wmi_chan_info_event
) },
111 [WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT
]
112 = { .min_len
= sizeof(struct wmi_pdev_bss_chan_info_event
) },
113 [WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT
]
114 = { .min_len
= sizeof(struct wmi_vdev_install_key_compl_event
) },
115 [WMI_TAG_READY_EVENT
] = {
116 .min_len
= sizeof(struct wmi_ready_event_min
) },
117 [WMI_TAG_SERVICE_AVAILABLE_EVENT
]
118 = {.min_len
= sizeof(struct wmi_service_available_event
) },
119 [WMI_TAG_PEER_ASSOC_CONF_EVENT
]
120 = { .min_len
= sizeof(struct wmi_peer_assoc_conf_event
) },
121 [WMI_TAG_STATS_EVENT
]
122 = { .min_len
= sizeof(struct wmi_stats_event
) },
123 [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT
]
124 = { .min_len
= sizeof(struct wmi_pdev_ctl_failsafe_chk_event
) },
127 #define PRIMAP(_hw_mode_) \
128 [_hw_mode_] = _hw_mode_##_PRI
130 static const int ath11k_hw_mode_pri_map
[] = {
131 PRIMAP(WMI_HOST_HW_MODE_SINGLE
),
132 PRIMAP(WMI_HOST_HW_MODE_DBS
),
133 PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE
),
134 PRIMAP(WMI_HOST_HW_MODE_SBS
),
135 PRIMAP(WMI_HOST_HW_MODE_DBS_SBS
),
136 PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS
),
138 PRIMAP(WMI_HOST_HW_MODE_MAX
),
142 ath11k_wmi_tlv_iter(struct ath11k_base
*ab
, const void *ptr
, size_t len
,
143 int (*iter
)(struct ath11k_base
*ab
, u16 tag
, u16 len
,
144 const void *ptr
, void *data
),
147 const void *begin
= ptr
;
148 const struct wmi_tlv
*tlv
;
149 u16 tlv_tag
, tlv_len
;
153 if (len
< sizeof(*tlv
)) {
154 ath11k_err(ab
, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
155 ptr
- begin
, len
, sizeof(*tlv
));
160 tlv_tag
= FIELD_GET(WMI_TLV_TAG
, tlv
->header
);
161 tlv_len
= FIELD_GET(WMI_TLV_LEN
, tlv
->header
);
166 ath11k_err(ab
, "wmi tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
167 tlv_tag
, ptr
- begin
, len
, tlv_len
);
171 if (tlv_tag
< ARRAY_SIZE(wmi_tlv_policies
) &&
172 wmi_tlv_policies
[tlv_tag
].min_len
&&
173 wmi_tlv_policies
[tlv_tag
].min_len
> tlv_len
) {
174 ath11k_err(ab
, "wmi tlv parse failure of tag %hhu at byte %zd (%hhu bytes is less than min length %zu)\n",
175 tlv_tag
, ptr
- begin
, tlv_len
,
176 wmi_tlv_policies
[tlv_tag
].min_len
);
180 ret
= iter(ab
, tlv_tag
, tlv_len
, ptr
, data
);
191 static int ath11k_wmi_tlv_iter_parse(struct ath11k_base
*ab
, u16 tag
, u16 len
,
192 const void *ptr
, void *data
)
194 const void **tb
= data
;
196 if (tag
< WMI_TAG_MAX
)
202 static int ath11k_wmi_tlv_parse(struct ath11k_base
*ar
, const void **tb
,
203 const void *ptr
, size_t len
)
205 return ath11k_wmi_tlv_iter(ar
, ptr
, len
, ath11k_wmi_tlv_iter_parse
,
210 ath11k_wmi_tlv_parse_alloc(struct ath11k_base
*ab
, const void *ptr
,
211 size_t len
, gfp_t gfp
)
216 tb
= kcalloc(WMI_TAG_MAX
, sizeof(*tb
), gfp
);
218 return ERR_PTR(-ENOMEM
);
220 ret
= ath11k_wmi_tlv_parse(ab
, tb
, ptr
, len
);
229 static int ath11k_wmi_cmd_send_nowait(struct ath11k_pdev_wmi
*wmi
, struct sk_buff
*skb
,
232 struct ath11k_skb_cb
*skb_cb
= ATH11K_SKB_CB(skb
);
233 struct ath11k_base
*ab
= wmi
->wmi_ab
->ab
;
234 struct wmi_cmd_hdr
*cmd_hdr
;
238 if (skb_push(skb
, sizeof(struct wmi_cmd_hdr
)) == NULL
)
241 cmd
|= FIELD_PREP(WMI_CMD_HDR_CMD_ID
, cmd_id
);
243 cmd_hdr
= (struct wmi_cmd_hdr
*)skb
->data
;
244 cmd_hdr
->cmd_id
= cmd
;
246 memset(skb_cb
, 0, sizeof(*skb_cb
));
247 ret
= ath11k_htc_send(&ab
->htc
, wmi
->eid
, skb
);
255 skb_pull(skb
, sizeof(struct wmi_cmd_hdr
));
259 int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi
*wmi
, struct sk_buff
*skb
,
262 struct ath11k_wmi_base
*wmi_sc
= wmi
->wmi_ab
;
263 int ret
= -EOPNOTSUPP
;
267 wait_event_timeout(wmi_sc
->tx_credits_wq
, ({
268 ret
= ath11k_wmi_cmd_send_nowait(wmi
, skb
, cmd_id
);
270 if (ret
&& test_bit(ATH11K_FLAG_CRASH_FLUSH
, &wmi_sc
->ab
->dev_flags
))
274 }), WMI_SEND_TIMEOUT_HZ
);
277 ath11k_warn(wmi_sc
->ab
, "wmi command %d timeout\n", cmd_id
);
282 static int ath11k_pull_svc_ready_ext(struct ath11k_pdev_wmi
*wmi_handle
,
284 struct ath11k_service_ext_param
*param
)
286 const struct wmi_service_ready_ext_event
*ev
= ptr
;
291 /* Move this to host based bitmap */
292 param
->default_conc_scan_config_bits
= ev
->default_conc_scan_config_bits
;
293 param
->default_fw_config_bits
= ev
->default_fw_config_bits
;
294 param
->he_cap_info
= ev
->he_cap_info
;
295 param
->mpdu_density
= ev
->mpdu_density
;
296 param
->max_bssid_rx_filters
= ev
->max_bssid_rx_filters
;
297 memcpy(¶m
->ppet
, &ev
->ppet
, sizeof(param
->ppet
));
303 ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi
*wmi_handle
,
304 struct wmi_soc_mac_phy_hw_mode_caps
*hw_caps
,
305 struct wmi_hw_mode_capabilities
*wmi_hw_mode_caps
,
306 struct wmi_soc_hal_reg_capabilities
*hal_reg_caps
,
307 struct wmi_mac_phy_capabilities
*wmi_mac_phy_caps
,
308 u8 hw_mode_id
, u8 phy_id
,
309 struct ath11k_pdev
*pdev
)
311 struct wmi_mac_phy_capabilities
*mac_phy_caps
;
312 struct ath11k_band_cap
*cap_band
;
313 struct ath11k_pdev_cap
*pdev_cap
= &pdev
->cap
;
315 u32 hw_idx
, phy_idx
= 0;
317 if (!hw_caps
|| !wmi_hw_mode_caps
|| !hal_reg_caps
)
320 for (hw_idx
= 0; hw_idx
< hw_caps
->num_hw_modes
; hw_idx
++) {
321 if (hw_mode_id
== wmi_hw_mode_caps
[hw_idx
].hw_mode_id
)
324 phy_map
= wmi_hw_mode_caps
[hw_idx
].phy_id_map
;
331 if (hw_idx
== hw_caps
->num_hw_modes
)
335 if (phy_id
>= hal_reg_caps
->num_phy
)
338 mac_phy_caps
= wmi_mac_phy_caps
+ phy_idx
;
340 pdev
->pdev_id
= mac_phy_caps
->pdev_id
;
341 pdev_cap
->supported_bands
|= mac_phy_caps
->supported_bands
;
342 pdev_cap
->ampdu_density
= mac_phy_caps
->ampdu_density
;
344 /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
345 * band to band for a single radio, need to see how this should be
348 if (mac_phy_caps
->supported_bands
& WMI_HOST_WLAN_2G_CAP
) {
349 pdev_cap
->tx_chain_mask
= mac_phy_caps
->tx_chain_mask_2g
;
350 pdev_cap
->rx_chain_mask
= mac_phy_caps
->rx_chain_mask_2g
;
351 } else if (mac_phy_caps
->supported_bands
& WMI_HOST_WLAN_5G_CAP
) {
352 pdev_cap
->vht_cap
= mac_phy_caps
->vht_cap_info_5g
;
353 pdev_cap
->vht_mcs
= mac_phy_caps
->vht_supp_mcs_5g
;
354 pdev_cap
->he_mcs
= mac_phy_caps
->he_supp_mcs_5g
;
355 pdev_cap
->tx_chain_mask
= mac_phy_caps
->tx_chain_mask_5g
;
356 pdev_cap
->rx_chain_mask
= mac_phy_caps
->rx_chain_mask_5g
;
361 /* tx/rx chainmask reported from fw depends on the actual hw chains used,
362 * For example, for 4x4 capable macphys, first 4 chains can be used for first
363 * mac and the remaing 4 chains can be used for the second mac or vice-versa.
364 * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
365 * will be advertised for second mac or vice-versa. Compute the shift value for
366 * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
369 pdev_cap
->tx_chain_mask_shift
=
370 find_first_bit((unsigned long *)&pdev_cap
->tx_chain_mask
, 32);
371 pdev_cap
->rx_chain_mask_shift
=
372 find_first_bit((unsigned long *)&pdev_cap
->rx_chain_mask
, 32);
374 if (mac_phy_caps
->supported_bands
& WMI_HOST_WLAN_2G_CAP
) {
375 cap_band
= &pdev_cap
->band
[NL80211_BAND_2GHZ
];
376 cap_band
->phy_id
= mac_phy_caps
->phy_id
;
377 cap_band
->max_bw_supported
= mac_phy_caps
->max_bw_supported_2g
;
378 cap_band
->ht_cap_info
= mac_phy_caps
->ht_cap_info_2g
;
379 cap_band
->he_cap_info
[0] = mac_phy_caps
->he_cap_info_2g
;
380 cap_band
->he_cap_info
[1] = mac_phy_caps
->he_cap_info_2g_ext
;
381 cap_band
->he_mcs
= mac_phy_caps
->he_supp_mcs_2g
;
382 memcpy(cap_band
->he_cap_phy_info
, &mac_phy_caps
->he_cap_phy_info_2g
,
383 sizeof(u32
) * PSOC_HOST_MAX_PHY_SIZE
);
384 memcpy(&cap_band
->he_ppet
, &mac_phy_caps
->he_ppet2g
,
385 sizeof(struct ath11k_ppe_threshold
));
388 if (mac_phy_caps
->supported_bands
& WMI_HOST_WLAN_5G_CAP
) {
389 cap_band
= &pdev_cap
->band
[NL80211_BAND_5GHZ
];
390 cap_band
->phy_id
= mac_phy_caps
->phy_id
;
391 cap_band
->max_bw_supported
= mac_phy_caps
->max_bw_supported_5g
;
392 cap_band
->ht_cap_info
= mac_phy_caps
->ht_cap_info_5g
;
393 cap_band
->he_cap_info
[0] = mac_phy_caps
->he_cap_info_5g
;
394 cap_band
->he_cap_info
[1] = mac_phy_caps
->he_cap_info_5g_ext
;
395 cap_band
->he_mcs
= mac_phy_caps
->he_supp_mcs_5g
;
396 memcpy(cap_band
->he_cap_phy_info
, &mac_phy_caps
->he_cap_phy_info_5g
,
397 sizeof(u32
) * PSOC_HOST_MAX_PHY_SIZE
);
398 memcpy(&cap_band
->he_ppet
, &mac_phy_caps
->he_ppet5g
,
399 sizeof(struct ath11k_ppe_threshold
));
402 cap_band
= &pdev_cap
->band
[NL80211_BAND_6GHZ
];
403 cap_band
->max_bw_supported
= mac_phy_caps
->max_bw_supported_5g
;
404 cap_band
->ht_cap_info
= mac_phy_caps
->ht_cap_info_5g
;
405 cap_band
->he_cap_info
[0] = mac_phy_caps
->he_cap_info_5g
;
406 cap_band
->he_cap_info
[1] = mac_phy_caps
->he_cap_info_5g_ext
;
407 cap_band
->he_mcs
= mac_phy_caps
->he_supp_mcs_5g
;
408 memcpy(cap_band
->he_cap_phy_info
, &mac_phy_caps
->he_cap_phy_info_5g
,
409 sizeof(u32
) * PSOC_HOST_MAX_PHY_SIZE
);
410 memcpy(&cap_band
->he_ppet
, &mac_phy_caps
->he_ppet5g
,
411 sizeof(struct ath11k_ppe_threshold
));
417 ath11k_pull_reg_cap_svc_rdy_ext(struct ath11k_pdev_wmi
*wmi_handle
,
418 struct wmi_soc_hal_reg_capabilities
*reg_caps
,
419 struct wmi_hal_reg_capabilities_ext
*wmi_ext_reg_cap
,
421 struct ath11k_hal_reg_capabilities_ext
*param
)
423 struct wmi_hal_reg_capabilities_ext
*ext_reg_cap
;
425 if (!reg_caps
|| !wmi_ext_reg_cap
)
428 if (phy_idx
>= reg_caps
->num_phy
)
431 ext_reg_cap
= &wmi_ext_reg_cap
[phy_idx
];
433 param
->phy_id
= ext_reg_cap
->phy_id
;
434 param
->eeprom_reg_domain
= ext_reg_cap
->eeprom_reg_domain
;
435 param
->eeprom_reg_domain_ext
=
436 ext_reg_cap
->eeprom_reg_domain_ext
;
437 param
->regcap1
= ext_reg_cap
->regcap1
;
438 param
->regcap2
= ext_reg_cap
->regcap2
;
439 /* check if param->wireless_mode is needed */
440 param
->low_2ghz_chan
= ext_reg_cap
->low_2ghz_chan
;
441 param
->high_2ghz_chan
= ext_reg_cap
->high_2ghz_chan
;
442 param
->low_5ghz_chan
= ext_reg_cap
->low_5ghz_chan
;
443 param
->high_5ghz_chan
= ext_reg_cap
->high_5ghz_chan
;
448 static int ath11k_pull_service_ready_tlv(struct ath11k_base
*ab
,
450 struct ath11k_targ_cap
*cap
)
452 const struct wmi_service_ready_event
*ev
= evt_buf
;
455 ath11k_err(ab
, "%s: failed by NULL param\n",
460 cap
->phy_capability
= ev
->phy_capability
;
461 cap
->max_frag_entry
= ev
->max_frag_entry
;
462 cap
->num_rf_chains
= ev
->num_rf_chains
;
463 cap
->ht_cap_info
= ev
->ht_cap_info
;
464 cap
->vht_cap_info
= ev
->vht_cap_info
;
465 cap
->vht_supp_mcs
= ev
->vht_supp_mcs
;
466 cap
->hw_min_tx_power
= ev
->hw_min_tx_power
;
467 cap
->hw_max_tx_power
= ev
->hw_max_tx_power
;
468 cap
->sys_cap_info
= ev
->sys_cap_info
;
469 cap
->min_pkt_size_enable
= ev
->min_pkt_size_enable
;
470 cap
->max_bcn_ie_size
= ev
->max_bcn_ie_size
;
471 cap
->max_num_scan_channels
= ev
->max_num_scan_channels
;
472 cap
->max_supported_macs
= ev
->max_supported_macs
;
473 cap
->wmi_fw_sub_feat_caps
= ev
->wmi_fw_sub_feat_caps
;
474 cap
->txrx_chainmask
= ev
->txrx_chainmask
;
475 cap
->default_dbs_hw_mode_index
= ev
->default_dbs_hw_mode_index
;
476 cap
->num_msdu_desc
= ev
->num_msdu_desc
;
481 /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
482 * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
485 static void ath11k_wmi_service_bitmap_copy(struct ath11k_pdev_wmi
*wmi
,
486 const u32
*wmi_svc_bm
)
490 for (i
= 0, j
= 0; i
< WMI_SERVICE_BM_SIZE
&& j
< WMI_MAX_SERVICE
; i
++) {
492 if (wmi_svc_bm
[i
] & BIT(j
% WMI_SERVICE_BITS_IN_SIZE32
))
493 set_bit(j
, wmi
->wmi_ab
->svc_map
);
494 } while (++j
% WMI_SERVICE_BITS_IN_SIZE32
);
498 static int ath11k_wmi_tlv_svc_rdy_parse(struct ath11k_base
*ab
, u16 tag
, u16 len
,
499 const void *ptr
, void *data
)
501 struct wmi_tlv_svc_ready_parse
*svc_ready
= data
;
502 struct ath11k_pdev_wmi
*wmi_handle
= &ab
->wmi_ab
.wmi
[0];
506 case WMI_TAG_SERVICE_READY_EVENT
:
507 if (ath11k_pull_service_ready_tlv(ab
, ptr
, &ab
->target_caps
))
511 case WMI_TAG_ARRAY_UINT32
:
512 if (!svc_ready
->wmi_svc_bitmap_done
) {
513 expect_len
= WMI_SERVICE_BM_SIZE
* sizeof(u32
);
514 if (len
< expect_len
) {
515 ath11k_warn(ab
, "invalid len %d for the tag 0x%x\n",
520 ath11k_wmi_service_bitmap_copy(wmi_handle
, ptr
);
522 svc_ready
->wmi_svc_bitmap_done
= true;
532 static int ath11k_service_ready_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
534 struct wmi_tlv_svc_ready_parse svc_ready
= { };
537 ret
= ath11k_wmi_tlv_iter(ab
, skb
->data
, skb
->len
,
538 ath11k_wmi_tlv_svc_rdy_parse
,
541 ath11k_warn(ab
, "failed to parse tlv %d\n", ret
);
548 struct sk_buff
*ath11k_wmi_alloc_skb(struct ath11k_wmi_base
*wmi_sc
, u32 len
)
551 struct ath11k_base
*ab
= wmi_sc
->ab
;
552 u32 round_len
= roundup(len
, 4);
554 skb
= ath11k_htc_alloc_skb(ab
, WMI_SKB_HEADROOM
+ round_len
);
558 skb_reserve(skb
, WMI_SKB_HEADROOM
);
559 if (!IS_ALIGNED((unsigned long)skb
->data
, 4))
560 ath11k_warn(ab
, "unaligned WMI skb data\n");
562 skb_put(skb
, round_len
);
563 memset(skb
->data
, 0, round_len
);
568 int ath11k_wmi_mgmt_send(struct ath11k
*ar
, u32 vdev_id
, u32 buf_id
,
569 struct sk_buff
*frame
)
571 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
572 struct wmi_mgmt_send_cmd
*cmd
;
573 struct wmi_tlv
*frame_tlv
;
578 buf_len
= frame
->len
< WMI_MGMT_SEND_DOWNLD_LEN
?
579 frame
->len
: WMI_MGMT_SEND_DOWNLD_LEN
;
581 len
= sizeof(*cmd
) + sizeof(*frame_tlv
) + roundup(buf_len
, 4);
583 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
587 cmd
= (struct wmi_mgmt_send_cmd
*)skb
->data
;
588 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_MGMT_TX_SEND_CMD
) |
589 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
590 cmd
->vdev_id
= vdev_id
;
591 cmd
->desc_id
= buf_id
;
593 cmd
->paddr_lo
= lower_32_bits(ATH11K_SKB_CB(frame
)->paddr
);
594 cmd
->paddr_hi
= upper_32_bits(ATH11K_SKB_CB(frame
)->paddr
);
595 cmd
->frame_len
= frame
->len
;
596 cmd
->buf_len
= buf_len
;
597 cmd
->tx_params_valid
= 0;
599 frame_tlv
= (struct wmi_tlv
*)(skb
->data
+ sizeof(*cmd
));
600 frame_tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_BYTE
) |
601 FIELD_PREP(WMI_TLV_LEN
, buf_len
);
603 memcpy(frame_tlv
->value
, frame
->data
, buf_len
);
605 ath11k_ce_byte_swap(frame_tlv
->value
, buf_len
);
607 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_MGMT_TX_SEND_CMDID
);
610 "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n");
617 int ath11k_wmi_vdev_create(struct ath11k
*ar
, u8
*macaddr
,
618 struct vdev_create_params
*param
)
620 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
621 struct wmi_vdev_create_cmd
*cmd
;
623 struct wmi_vdev_txrx_streams
*txrx_streams
;
628 /* It can be optimized my sending tx/rx chain configuration
629 * only for supported bands instead of always sending it for
632 len
= sizeof(*cmd
) + TLV_HDR_SIZE
+
633 (WMI_NUM_SUPPORTED_BAND_MAX
* sizeof(*txrx_streams
));
635 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
639 cmd
= (struct wmi_vdev_create_cmd
*)skb
->data
;
640 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_CREATE_CMD
) |
641 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
643 cmd
->vdev_id
= param
->if_id
;
644 cmd
->vdev_type
= param
->type
;
645 cmd
->vdev_subtype
= param
->subtype
;
646 cmd
->num_cfg_txrx_streams
= WMI_NUM_SUPPORTED_BAND_MAX
;
647 cmd
->pdev_id
= param
->pdev_id
;
648 ether_addr_copy(cmd
->vdev_macaddr
.addr
, macaddr
);
650 ptr
= skb
->data
+ sizeof(*cmd
);
651 len
= WMI_NUM_SUPPORTED_BAND_MAX
* sizeof(*txrx_streams
);
654 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
655 FIELD_PREP(WMI_TLV_LEN
, len
);
659 len
= sizeof(*txrx_streams
);
660 txrx_streams
->tlv_header
=
661 FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_TXRX_STREAMS
) |
662 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
663 txrx_streams
->band
= WMI_TPC_CHAINMASK_CONFIG_BAND_2G
;
664 txrx_streams
->supported_tx_streams
=
665 param
->chains
[NL80211_BAND_2GHZ
].tx
;
666 txrx_streams
->supported_rx_streams
=
667 param
->chains
[NL80211_BAND_2GHZ
].rx
;
670 txrx_streams
->tlv_header
=
671 FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_TXRX_STREAMS
) |
672 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
673 txrx_streams
->band
= WMI_TPC_CHAINMASK_CONFIG_BAND_5G
;
674 txrx_streams
->supported_tx_streams
=
675 param
->chains
[NL80211_BAND_5GHZ
].tx
;
676 txrx_streams
->supported_rx_streams
=
677 param
->chains
[NL80211_BAND_5GHZ
].rx
;
679 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_VDEV_CREATE_CMDID
);
682 "failed to submit WMI_VDEV_CREATE_CMDID\n");
686 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
687 "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
688 param
->if_id
, param
->type
, param
->subtype
,
689 macaddr
, param
->pdev_id
);
694 int ath11k_wmi_vdev_delete(struct ath11k
*ar
, u8 vdev_id
)
696 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
697 struct wmi_vdev_delete_cmd
*cmd
;
701 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
705 cmd
= (struct wmi_vdev_delete_cmd
*)skb
->data
;
706 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_DELETE_CMD
) |
707 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
708 cmd
->vdev_id
= vdev_id
;
710 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_VDEV_DELETE_CMDID
);
712 ath11k_warn(ar
->ab
, "failed to submit WMI_VDEV_DELETE_CMDID\n");
716 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
, "WMI vdev delete id %d\n", vdev_id
);
721 int ath11k_wmi_vdev_stop(struct ath11k
*ar
, u8 vdev_id
)
723 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
724 struct wmi_vdev_stop_cmd
*cmd
;
728 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
732 cmd
= (struct wmi_vdev_stop_cmd
*)skb
->data
;
734 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_STOP_CMD
) |
735 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
736 cmd
->vdev_id
= vdev_id
;
738 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_VDEV_STOP_CMDID
);
740 ath11k_warn(ar
->ab
, "failed to submit WMI_VDEV_STOP cmd\n");
744 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
, "WMI vdev stop id 0x%x\n", vdev_id
);
749 int ath11k_wmi_vdev_down(struct ath11k
*ar
, u8 vdev_id
)
751 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
752 struct wmi_vdev_down_cmd
*cmd
;
756 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
760 cmd
= (struct wmi_vdev_down_cmd
*)skb
->data
;
762 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_DOWN_CMD
) |
763 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
764 cmd
->vdev_id
= vdev_id
;
766 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_VDEV_DOWN_CMDID
);
768 ath11k_warn(ar
->ab
, "failed to submit WMI_VDEV_DOWN cmd\n");
772 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
, "WMI vdev down id 0x%x\n", vdev_id
);
777 static void ath11k_wmi_put_wmi_channel(struct wmi_channel
*chan
,
778 struct wmi_vdev_start_req_arg
*arg
)
780 memset(chan
, 0, sizeof(*chan
));
782 chan
->mhz
= arg
->channel
.freq
;
783 chan
->band_center_freq1
= arg
->channel
.band_center_freq1
;
784 if (arg
->channel
.mode
== MODE_11AC_VHT80_80
)
785 chan
->band_center_freq2
= arg
->channel
.band_center_freq2
;
787 chan
->band_center_freq2
= 0;
789 chan
->info
|= FIELD_PREP(WMI_CHAN_INFO_MODE
, arg
->channel
.mode
);
790 if (arg
->channel
.passive
)
791 chan
->info
|= WMI_CHAN_INFO_PASSIVE
;
792 if (arg
->channel
.allow_ibss
)
793 chan
->info
|= WMI_CHAN_INFO_ADHOC_ALLOWED
;
794 if (arg
->channel
.allow_ht
)
795 chan
->info
|= WMI_CHAN_INFO_ALLOW_HT
;
796 if (arg
->channel
.allow_vht
)
797 chan
->info
|= WMI_CHAN_INFO_ALLOW_VHT
;
798 if (arg
->channel
.allow_he
)
799 chan
->info
|= WMI_CHAN_INFO_ALLOW_HE
;
800 if (arg
->channel
.ht40plus
)
801 chan
->info
|= WMI_CHAN_INFO_HT40_PLUS
;
802 if (arg
->channel
.chan_radar
)
803 chan
->info
|= WMI_CHAN_INFO_DFS
;
804 if (arg
->channel
.freq2_radar
)
805 chan
->info
|= WMI_CHAN_INFO_DFS_FREQ2
;
807 chan
->reg_info_1
= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR
,
808 arg
->channel
.max_power
) |
809 FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR
,
810 arg
->channel
.max_reg_power
);
812 chan
->reg_info_2
= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX
,
813 arg
->channel
.max_antenna_gain
) |
814 FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR
,
815 arg
->channel
.max_power
);
818 int ath11k_wmi_vdev_start(struct ath11k
*ar
, struct wmi_vdev_start_req_arg
*arg
,
821 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
822 struct wmi_vdev_start_request_cmd
*cmd
;
824 struct wmi_channel
*chan
;
829 if (WARN_ON(arg
->ssid_len
> sizeof(cmd
->ssid
.ssid
)))
832 len
= sizeof(*cmd
) + sizeof(*chan
) + TLV_HDR_SIZE
;
834 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
838 cmd
= (struct wmi_vdev_start_request_cmd
*)skb
->data
;
839 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
840 WMI_TAG_VDEV_START_REQUEST_CMD
) |
841 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
842 cmd
->vdev_id
= arg
->vdev_id
;
843 cmd
->beacon_interval
= arg
->bcn_intval
;
844 cmd
->bcn_tx_rate
= arg
->bcn_tx_rate
;
845 cmd
->dtim_period
= arg
->dtim_period
;
846 cmd
->num_noa_descriptors
= arg
->num_noa_descriptors
;
847 cmd
->preferred_rx_streams
= arg
->pref_rx_streams
;
848 cmd
->preferred_tx_streams
= arg
->pref_tx_streams
;
849 cmd
->cac_duration_ms
= arg
->cac_duration_ms
;
850 cmd
->regdomain
= arg
->regdomain
;
851 cmd
->he_ops
= arg
->he_ops
;
855 cmd
->ssid
.ssid_len
= arg
->ssid_len
;
856 memcpy(cmd
->ssid
.ssid
, arg
->ssid
, arg
->ssid_len
);
858 if (arg
->hidden_ssid
)
859 cmd
->flags
|= WMI_VDEV_START_HIDDEN_SSID
;
860 if (arg
->pmf_enabled
)
861 cmd
->flags
|= WMI_VDEV_START_PMF_ENABLED
;
864 cmd
->flags
|= WMI_VDEV_START_LDPC_RX_ENABLED
;
866 ptr
= skb
->data
+ sizeof(*cmd
);
869 ath11k_wmi_put_wmi_channel(chan
, arg
);
871 chan
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_CHANNEL
) |
872 FIELD_PREP(WMI_TLV_LEN
,
873 sizeof(*chan
) - TLV_HDR_SIZE
);
874 ptr
+= sizeof(*chan
);
877 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
878 FIELD_PREP(WMI_TLV_LEN
, 0);
880 /* Note: This is a nested TLV containing:
881 * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
887 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
888 WMI_VDEV_RESTART_REQUEST_CMDID
);
890 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
891 WMI_VDEV_START_REQUEST_CMDID
);
893 ath11k_warn(ar
->ab
, "failed to submit vdev_%s cmd\n",
894 restart
? "restart" : "start");
898 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
899 restart
? "restart" : "start", arg
->vdev_id
,
900 arg
->channel
.freq
, arg
->channel
.mode
);
905 int ath11k_wmi_vdev_up(struct ath11k
*ar
, u32 vdev_id
, u32 aid
, const u8
*bssid
)
907 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
908 struct wmi_vdev_up_cmd
*cmd
;
912 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
916 cmd
= (struct wmi_vdev_up_cmd
*)skb
->data
;
918 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_UP_CMD
) |
919 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
920 cmd
->vdev_id
= vdev_id
;
921 cmd
->vdev_assoc_id
= aid
;
923 ether_addr_copy(cmd
->vdev_bssid
.addr
, bssid
);
925 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_VDEV_UP_CMDID
);
927 ath11k_warn(ar
->ab
, "failed to submit WMI_VDEV_UP cmd\n");
931 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
932 "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
933 vdev_id
, aid
, bssid
);
938 int ath11k_wmi_send_peer_create_cmd(struct ath11k
*ar
,
939 struct peer_create_params
*param
)
941 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
942 struct wmi_peer_create_cmd
*cmd
;
946 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
950 cmd
= (struct wmi_peer_create_cmd
*)skb
->data
;
951 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PEER_CREATE_CMD
) |
952 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
954 ether_addr_copy(cmd
->peer_macaddr
.addr
, param
->peer_addr
);
955 cmd
->peer_type
= param
->peer_type
;
956 cmd
->vdev_id
= param
->vdev_id
;
958 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PEER_CREATE_CMDID
);
960 ath11k_warn(ar
->ab
, "failed to submit WMI_PEER_CREATE cmd\n");
964 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
965 "WMI peer create vdev_id %d peer_addr %pM\n",
966 param
->vdev_id
, param
->peer_addr
);
971 int ath11k_wmi_send_peer_delete_cmd(struct ath11k
*ar
,
972 const u8
*peer_addr
, u8 vdev_id
)
974 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
975 struct wmi_peer_delete_cmd
*cmd
;
979 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
983 cmd
= (struct wmi_peer_delete_cmd
*)skb
->data
;
984 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PEER_DELETE_CMD
) |
985 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
987 ether_addr_copy(cmd
->peer_macaddr
.addr
, peer_addr
);
988 cmd
->vdev_id
= vdev_id
;
990 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
991 "WMI peer delete vdev_id %d peer_addr %pM\n",
994 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PEER_DELETE_CMDID
);
996 ath11k_warn(ar
->ab
, "failed to send WMI_PEER_DELETE cmd\n");
1003 int ath11k_wmi_send_pdev_set_regdomain(struct ath11k
*ar
,
1004 struct pdev_set_regdomain_params
*param
)
1006 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1007 struct wmi_pdev_set_regdomain_cmd
*cmd
;
1008 struct sk_buff
*skb
;
1011 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1015 cmd
= (struct wmi_pdev_set_regdomain_cmd
*)skb
->data
;
1016 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1017 WMI_TAG_PDEV_SET_REGDOMAIN_CMD
) |
1018 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1020 cmd
->reg_domain
= param
->current_rd_in_use
;
1021 cmd
->reg_domain_2g
= param
->current_rd_2g
;
1022 cmd
->reg_domain_5g
= param
->current_rd_5g
;
1023 cmd
->conformance_test_limit_2g
= param
->ctl_2g
;
1024 cmd
->conformance_test_limit_5g
= param
->ctl_5g
;
1025 cmd
->dfs_domain
= param
->dfs_domain
;
1026 cmd
->pdev_id
= param
->pdev_id
;
1028 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1029 "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n",
1030 param
->current_rd_in_use
, param
->current_rd_2g
,
1031 param
->current_rd_5g
, param
->dfs_domain
, param
->pdev_id
);
1033 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PDEV_SET_REGDOMAIN_CMDID
);
1036 "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n");
1043 int ath11k_wmi_set_peer_param(struct ath11k
*ar
, const u8
*peer_addr
,
1044 u32 vdev_id
, u32 param_id
, u32 param_val
)
1046 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1047 struct wmi_peer_set_param_cmd
*cmd
;
1048 struct sk_buff
*skb
;
1051 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1055 cmd
= (struct wmi_peer_set_param_cmd
*)skb
->data
;
1056 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PEER_SET_PARAM_CMD
) |
1057 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1058 ether_addr_copy(cmd
->peer_macaddr
.addr
, peer_addr
);
1059 cmd
->vdev_id
= vdev_id
;
1060 cmd
->param_id
= param_id
;
1061 cmd
->param_value
= param_val
;
1063 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PEER_SET_PARAM_CMDID
);
1065 ath11k_warn(ar
->ab
, "failed to send WMI_PEER_SET_PARAM cmd\n");
1069 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1070 "WMI vdev %d peer 0x%pM set param %d value %d\n",
1071 vdev_id
, peer_addr
, param_id
, param_val
);
1076 int ath11k_wmi_send_peer_flush_tids_cmd(struct ath11k
*ar
,
1077 u8 peer_addr
[ETH_ALEN
],
1078 struct peer_flush_params
*param
)
1080 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1081 struct wmi_peer_flush_tids_cmd
*cmd
;
1082 struct sk_buff
*skb
;
1085 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1089 cmd
= (struct wmi_peer_flush_tids_cmd
*)skb
->data
;
1090 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PEER_FLUSH_TIDS_CMD
) |
1091 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1093 ether_addr_copy(cmd
->peer_macaddr
.addr
, peer_addr
);
1094 cmd
->peer_tid_bitmap
= param
->peer_tid_bitmap
;
1095 cmd
->vdev_id
= param
->vdev_id
;
1097 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PEER_FLUSH_TIDS_CMDID
);
1100 "failed to send WMI_PEER_FLUSH_TIDS cmd\n");
1104 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1105 "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n",
1106 param
->vdev_id
, peer_addr
, param
->peer_tid_bitmap
);
1111 int ath11k_wmi_peer_rx_reorder_queue_setup(struct ath11k
*ar
,
1112 int vdev_id
, const u8
*addr
,
1113 dma_addr_t paddr
, u8 tid
,
1114 u8 ba_window_size_valid
,
1117 struct wmi_peer_reorder_queue_setup_cmd
*cmd
;
1118 struct sk_buff
*skb
;
1121 skb
= ath11k_wmi_alloc_skb(ar
->wmi
->wmi_ab
, sizeof(*cmd
));
1125 cmd
= (struct wmi_peer_reorder_queue_setup_cmd
*)skb
->data
;
1126 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1127 WMI_TAG_REORDER_QUEUE_SETUP_CMD
) |
1128 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1130 ether_addr_copy(cmd
->peer_macaddr
.addr
, addr
);
1131 cmd
->vdev_id
= vdev_id
;
1133 cmd
->queue_ptr_lo
= lower_32_bits(paddr
);
1134 cmd
->queue_ptr_hi
= upper_32_bits(paddr
);
1135 cmd
->queue_no
= tid
;
1136 cmd
->ba_window_size_valid
= ba_window_size_valid
;
1137 cmd
->ba_window_size
= ba_window_size
;
1139 ret
= ath11k_wmi_cmd_send(ar
->wmi
, skb
,
1140 WMI_PEER_REORDER_QUEUE_SETUP_CMDID
);
1143 "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n");
1147 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1148 "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n",
1149 addr
, vdev_id
, tid
);
1155 ath11k_wmi_rx_reord_queue_remove(struct ath11k
*ar
,
1156 struct rx_reorder_queue_remove_params
*param
)
1158 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1159 struct wmi_peer_reorder_queue_remove_cmd
*cmd
;
1160 struct sk_buff
*skb
;
1163 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1167 cmd
= (struct wmi_peer_reorder_queue_remove_cmd
*)skb
->data
;
1168 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1169 WMI_TAG_REORDER_QUEUE_REMOVE_CMD
) |
1170 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1172 ether_addr_copy(cmd
->peer_macaddr
.addr
, param
->peer_macaddr
);
1173 cmd
->vdev_id
= param
->vdev_id
;
1174 cmd
->tid_mask
= param
->peer_tid_bitmap
;
1176 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1177 "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__
,
1178 param
->peer_macaddr
, param
->vdev_id
, param
->peer_tid_bitmap
);
1180 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
1181 WMI_PEER_REORDER_QUEUE_REMOVE_CMDID
);
1184 "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID");
1191 int ath11k_wmi_pdev_set_param(struct ath11k
*ar
, u32 param_id
,
1192 u32 param_value
, u8 pdev_id
)
1194 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1195 struct wmi_pdev_set_param_cmd
*cmd
;
1196 struct sk_buff
*skb
;
1199 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1203 cmd
= (struct wmi_pdev_set_param_cmd
*)skb
->data
;
1204 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_SET_PARAM_CMD
) |
1205 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1206 cmd
->pdev_id
= pdev_id
;
1207 cmd
->param_id
= param_id
;
1208 cmd
->param_value
= param_value
;
1210 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PDEV_SET_PARAM_CMDID
);
1212 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1216 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1217 "WMI pdev set param %d pdev id %d value %d\n",
1218 param_id
, pdev_id
, param_value
);
1223 int ath11k_wmi_pdev_set_ps_mode(struct ath11k
*ar
, int vdev_id
, u32 enable
)
1225 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1226 struct wmi_pdev_set_ps_mode_cmd
*cmd
;
1227 struct sk_buff
*skb
;
1230 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1234 cmd
= (struct wmi_pdev_set_ps_mode_cmd
*)skb
->data
;
1235 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_STA_POWERSAVE_MODE_CMD
) |
1236 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1237 cmd
->vdev_id
= vdev_id
;
1238 cmd
->sta_ps_mode
= enable
;
1240 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_STA_POWERSAVE_MODE_CMDID
);
1242 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1246 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1247 "WMI vdev set psmode %d vdev id %d\n",
1253 int ath11k_wmi_pdev_suspend(struct ath11k
*ar
, u32 suspend_opt
,
1256 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1257 struct wmi_pdev_suspend_cmd
*cmd
;
1258 struct sk_buff
*skb
;
1261 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1265 cmd
= (struct wmi_pdev_suspend_cmd
*)skb
->data
;
1267 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_SUSPEND_CMD
) |
1268 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1270 cmd
->suspend_opt
= suspend_opt
;
1271 cmd
->pdev_id
= pdev_id
;
1273 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PDEV_SUSPEND_CMDID
);
1275 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_SUSPEND cmd\n");
1279 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1280 "WMI pdev suspend pdev_id %d\n", pdev_id
);
1285 int ath11k_wmi_pdev_resume(struct ath11k
*ar
, u32 pdev_id
)
1287 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1288 struct wmi_pdev_resume_cmd
*cmd
;
1289 struct sk_buff
*skb
;
1292 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1296 cmd
= (struct wmi_pdev_resume_cmd
*)skb
->data
;
1298 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_RESUME_CMD
) |
1299 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1300 cmd
->pdev_id
= pdev_id
;
1302 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1303 "WMI pdev resume pdev id %d\n", pdev_id
);
1305 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PDEV_RESUME_CMDID
);
1307 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_RESUME cmd\n");
1314 /* TODO FW Support for the cmd is not available yet.
1315 * Can be tested once the command and corresponding
1316 * event is implemented in FW
1318 int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k
*ar
,
1319 enum wmi_bss_chan_info_req_type type
)
1321 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1322 struct wmi_pdev_bss_chan_info_req_cmd
*cmd
;
1323 struct sk_buff
*skb
;
1326 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1330 cmd
= (struct wmi_pdev_bss_chan_info_req_cmd
*)skb
->data
;
1332 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1333 WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST
) |
1334 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1335 cmd
->req_type
= type
;
1337 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1338 "WMI bss chan info req type %d\n", type
);
1340 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
1341 WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID
);
1344 "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n");
1351 int ath11k_wmi_send_set_ap_ps_param_cmd(struct ath11k
*ar
, u8
*peer_addr
,
1352 struct ap_ps_params
*param
)
1354 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1355 struct wmi_ap_ps_peer_cmd
*cmd
;
1356 struct sk_buff
*skb
;
1359 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1363 cmd
= (struct wmi_ap_ps_peer_cmd
*)skb
->data
;
1364 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_AP_PS_PEER_CMD
) |
1365 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1367 cmd
->vdev_id
= param
->vdev_id
;
1368 ether_addr_copy(cmd
->peer_macaddr
.addr
, peer_addr
);
1369 cmd
->param
= param
->param
;
1370 cmd
->value
= param
->value
;
1372 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_AP_PS_PEER_PARAM_CMDID
);
1375 "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n");
1379 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1380 "WMI set ap ps vdev id %d peer %pM param %d value %d\n",
1381 param
->vdev_id
, peer_addr
, param
->param
, param
->value
);
1386 int ath11k_wmi_set_sta_ps_param(struct ath11k
*ar
, u32 vdev_id
,
1387 u32 param
, u32 param_value
)
1389 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1390 struct wmi_sta_powersave_param_cmd
*cmd
;
1391 struct sk_buff
*skb
;
1394 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1398 cmd
= (struct wmi_sta_powersave_param_cmd
*)skb
->data
;
1399 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1400 WMI_TAG_STA_POWERSAVE_PARAM_CMD
) |
1401 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1403 cmd
->vdev_id
= vdev_id
;
1405 cmd
->value
= param_value
;
1407 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1408 "WMI set sta ps vdev_id %d param %d value %d\n",
1409 vdev_id
, param
, param_value
);
1411 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_STA_POWERSAVE_PARAM_CMDID
);
1413 ath11k_warn(ar
->ab
, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID");
1420 int ath11k_wmi_force_fw_hang_cmd(struct ath11k
*ar
, u32 type
, u32 delay_time_ms
)
1422 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1423 struct wmi_force_fw_hang_cmd
*cmd
;
1424 struct sk_buff
*skb
;
1429 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
1433 cmd
= (struct wmi_force_fw_hang_cmd
*)skb
->data
;
1434 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_FORCE_FW_HANG_CMD
) |
1435 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
1438 cmd
->delay_time_ms
= delay_time_ms
;
1440 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_FORCE_FW_HANG_CMDID
);
1443 ath11k_warn(ar
->ab
, "Failed to send WMI_FORCE_FW_HANG_CMDID");
1449 int ath11k_wmi_vdev_set_param_cmd(struct ath11k
*ar
, u32 vdev_id
,
1450 u32 param_id
, u32 param_value
)
1452 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1453 struct wmi_vdev_set_param_cmd
*cmd
;
1454 struct sk_buff
*skb
;
1457 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1461 cmd
= (struct wmi_vdev_set_param_cmd
*)skb
->data
;
1462 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_SET_PARAM_CMD
) |
1463 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1465 cmd
->vdev_id
= vdev_id
;
1466 cmd
->param_id
= param_id
;
1467 cmd
->param_value
= param_value
;
1469 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_VDEV_SET_PARAM_CMDID
);
1472 "failed to send WMI_VDEV_SET_PARAM_CMDID\n");
1476 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1477 "WMI vdev id 0x%x set param %d value %d\n",
1478 vdev_id
, param_id
, param_value
);
1483 int ath11k_wmi_send_stats_request_cmd(struct ath11k
*ar
,
1484 struct stats_request_params
*param
)
1486 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1487 struct wmi_request_stats_cmd
*cmd
;
1488 struct sk_buff
*skb
;
1491 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1495 cmd
= (struct wmi_request_stats_cmd
*)skb
->data
;
1496 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_REQUEST_STATS_CMD
) |
1497 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1499 cmd
->stats_id
= param
->stats_id
;
1500 cmd
->vdev_id
= param
->vdev_id
;
1501 cmd
->pdev_id
= param
->pdev_id
;
1503 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_REQUEST_STATS_CMDID
);
1505 ath11k_warn(ar
->ab
, "failed to send WMI_REQUEST_STATS cmd\n");
1509 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1510 "WMI request stats 0x%x vdev id %d pdev id %d\n",
1511 param
->stats_id
, param
->vdev_id
, param
->pdev_id
);
1516 int ath11k_wmi_send_pdev_temperature_cmd(struct ath11k
*ar
)
1518 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1519 struct wmi_get_pdev_temperature_cmd
*cmd
;
1520 struct sk_buff
*skb
;
1523 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1527 cmd
= (struct wmi_get_pdev_temperature_cmd
*)skb
->data
;
1528 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_GET_TEMPERATURE_CMD
) |
1529 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1530 cmd
->pdev_id
= ar
->pdev
->pdev_id
;
1532 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PDEV_GET_TEMPERATURE_CMDID
);
1534 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n");
1538 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1539 "WMI pdev get temperature for pdev_id %d\n", ar
->pdev
->pdev_id
);
1544 int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k
*ar
,
1545 u32 vdev_id
, u32 bcn_ctrl_op
)
1547 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1548 struct wmi_bcn_offload_ctrl_cmd
*cmd
;
1549 struct sk_buff
*skb
;
1552 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1556 cmd
= (struct wmi_bcn_offload_ctrl_cmd
*)skb
->data
;
1557 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1558 WMI_TAG_BCN_OFFLOAD_CTRL_CMD
) |
1559 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1561 cmd
->vdev_id
= vdev_id
;
1562 cmd
->bcn_ctrl_op
= bcn_ctrl_op
;
1564 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1565 "WMI bcn ctrl offload vdev id %d ctrl_op %d\n",
1566 vdev_id
, bcn_ctrl_op
);
1568 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_BCN_OFFLOAD_CTRL_CMDID
);
1571 "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n");
1578 int ath11k_wmi_bcn_tmpl(struct ath11k
*ar
, u32 vdev_id
,
1579 struct ieee80211_mutable_offsets
*offs
,
1580 struct sk_buff
*bcn
)
1582 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1583 struct wmi_bcn_tmpl_cmd
*cmd
;
1584 struct wmi_bcn_prb_info
*bcn_prb_info
;
1585 struct wmi_tlv
*tlv
;
1586 struct sk_buff
*skb
;
1589 size_t aligned_len
= roundup(bcn
->len
, 4);
1591 len
= sizeof(*cmd
) + sizeof(*bcn_prb_info
) + TLV_HDR_SIZE
+ aligned_len
;
1593 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
1597 cmd
= (struct wmi_bcn_tmpl_cmd
*)skb
->data
;
1598 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_BCN_TMPL_CMD
) |
1599 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1600 cmd
->vdev_id
= vdev_id
;
1601 cmd
->tim_ie_offset
= offs
->tim_offset
;
1602 cmd
->csa_switch_count_offset
= offs
->cntdwn_counter_offs
[0];
1603 cmd
->ext_csa_switch_count_offset
= offs
->cntdwn_counter_offs
[1];
1604 cmd
->buf_len
= bcn
->len
;
1606 ptr
= skb
->data
+ sizeof(*cmd
);
1609 len
= sizeof(*bcn_prb_info
);
1610 bcn_prb_info
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1611 WMI_TAG_BCN_PRB_INFO
) |
1612 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
1613 bcn_prb_info
->caps
= 0;
1614 bcn_prb_info
->erp
= 0;
1616 ptr
+= sizeof(*bcn_prb_info
);
1619 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_BYTE
) |
1620 FIELD_PREP(WMI_TLV_LEN
, aligned_len
);
1621 memcpy(tlv
->value
, bcn
->data
, bcn
->len
);
1623 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_BCN_TMPL_CMDID
);
1625 ath11k_warn(ar
->ab
, "failed to send WMI_BCN_TMPL_CMDID\n");
1632 int ath11k_wmi_vdev_install_key(struct ath11k
*ar
,
1633 struct wmi_vdev_install_key_arg
*arg
)
1635 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1636 struct wmi_vdev_install_key_cmd
*cmd
;
1637 struct wmi_tlv
*tlv
;
1638 struct sk_buff
*skb
;
1640 int key_len_aligned
= roundup(arg
->key_len
, sizeof(uint32_t));
1642 len
= sizeof(*cmd
) + TLV_HDR_SIZE
+ key_len_aligned
;
1644 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
1648 cmd
= (struct wmi_vdev_install_key_cmd
*)skb
->data
;
1649 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_INSTALL_KEY_CMD
) |
1650 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1651 cmd
->vdev_id
= arg
->vdev_id
;
1652 ether_addr_copy(cmd
->peer_macaddr
.addr
, arg
->macaddr
);
1653 cmd
->key_idx
= arg
->key_idx
;
1654 cmd
->key_flags
= arg
->key_flags
;
1655 cmd
->key_cipher
= arg
->key_cipher
;
1656 cmd
->key_len
= arg
->key_len
;
1657 cmd
->key_txmic_len
= arg
->key_txmic_len
;
1658 cmd
->key_rxmic_len
= arg
->key_rxmic_len
;
1660 if (arg
->key_rsc_counter
)
1661 memcpy(&cmd
->key_rsc_counter
, &arg
->key_rsc_counter
,
1662 sizeof(struct wmi_key_seq_counter
));
1664 tlv
= (struct wmi_tlv
*)(skb
->data
+ sizeof(*cmd
));
1665 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_BYTE
) |
1666 FIELD_PREP(WMI_TLV_LEN
, key_len_aligned
);
1667 memcpy(tlv
->value
, (u8
*)arg
->key_data
, key_len_aligned
);
1669 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_VDEV_INSTALL_KEY_CMDID
);
1672 "failed to send WMI_VDEV_INSTALL_KEY cmd\n");
1676 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1677 "WMI vdev install key idx %d cipher %d len %d\n",
1678 arg
->key_idx
, arg
->key_cipher
, arg
->key_len
);
1684 ath11k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd
*cmd
,
1685 struct peer_assoc_params
*param
)
1687 cmd
->peer_flags
= 0;
1689 if (param
->is_wme_set
) {
1690 if (param
->qos_flag
)
1691 cmd
->peer_flags
|= WMI_PEER_QOS
;
1692 if (param
->apsd_flag
)
1693 cmd
->peer_flags
|= WMI_PEER_APSD
;
1695 cmd
->peer_flags
|= WMI_PEER_HT
;
1697 cmd
->peer_flags
|= WMI_PEER_40MHZ
;
1699 cmd
->peer_flags
|= WMI_PEER_80MHZ
;
1701 cmd
->peer_flags
|= WMI_PEER_160MHZ
;
1703 /* Typically if STBC is enabled for VHT it should be enabled
1706 if (param
->stbc_flag
)
1707 cmd
->peer_flags
|= WMI_PEER_STBC
;
1709 /* Typically if LDPC is enabled for VHT it should be enabled
1712 if (param
->ldpc_flag
)
1713 cmd
->peer_flags
|= WMI_PEER_LDPC
;
1715 if (param
->static_mimops_flag
)
1716 cmd
->peer_flags
|= WMI_PEER_STATIC_MIMOPS
;
1717 if (param
->dynamic_mimops_flag
)
1718 cmd
->peer_flags
|= WMI_PEER_DYN_MIMOPS
;
1719 if (param
->spatial_mux_flag
)
1720 cmd
->peer_flags
|= WMI_PEER_SPATIAL_MUX
;
1721 if (param
->vht_flag
)
1722 cmd
->peer_flags
|= WMI_PEER_VHT
;
1724 cmd
->peer_flags
|= WMI_PEER_HE
;
1725 if (param
->twt_requester
)
1726 cmd
->peer_flags
|= WMI_PEER_TWT_REQ
;
1727 if (param
->twt_responder
)
1728 cmd
->peer_flags
|= WMI_PEER_TWT_RESP
;
1731 /* Suppress authorization for all AUTH modes that need 4-way handshake
1732 * (during re-association).
1733 * Authorization will be done for these modes on key installation.
1735 if (param
->auth_flag
)
1736 cmd
->peer_flags
|= WMI_PEER_AUTH
;
1737 if (param
->need_ptk_4_way
) {
1738 cmd
->peer_flags
|= WMI_PEER_NEED_PTK_4_WAY
;
1739 cmd
->peer_flags
&= ~WMI_PEER_AUTH
;
1741 if (param
->need_gtk_2_way
)
1742 cmd
->peer_flags
|= WMI_PEER_NEED_GTK_2_WAY
;
1743 /* safe mode bypass the 4-way handshake */
1744 if (param
->safe_mode_enabled
)
1745 cmd
->peer_flags
&= ~(WMI_PEER_NEED_PTK_4_WAY
|
1746 WMI_PEER_NEED_GTK_2_WAY
);
1748 if (param
->is_pmf_enabled
)
1749 cmd
->peer_flags
|= WMI_PEER_PMF
;
1751 /* Disable AMSDU for station transmit, if user configures it */
1752 /* Disable AMSDU for AP transmit to 11n Stations, if user configures
1754 * if (param->amsdu_disable) Add after FW support
1757 /* Target asserts if node is marked HT and all MCS is set to 0.
1758 * Mark the node as non-HT if all the mcs rates are disabled through
1761 if (param
->peer_ht_rates
.num_rates
== 0)
1762 cmd
->peer_flags
&= ~WMI_PEER_HT
;
1765 int ath11k_wmi_send_peer_assoc_cmd(struct ath11k
*ar
,
1766 struct peer_assoc_params
*param
)
1768 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1769 struct wmi_peer_assoc_complete_cmd
*cmd
;
1770 struct wmi_vht_rate_set
*mcs
;
1771 struct wmi_he_rate_set
*he_mcs
;
1772 struct sk_buff
*skb
;
1773 struct wmi_tlv
*tlv
;
1775 u32 peer_legacy_rates_align
;
1776 u32 peer_ht_rates_align
;
1779 peer_legacy_rates_align
= roundup(param
->peer_legacy_rates
.num_rates
,
1781 peer_ht_rates_align
= roundup(param
->peer_ht_rates
.num_rates
,
1784 len
= sizeof(*cmd
) +
1785 TLV_HDR_SIZE
+ (peer_legacy_rates_align
* sizeof(u8
)) +
1786 TLV_HDR_SIZE
+ (peer_ht_rates_align
* sizeof(u8
)) +
1787 sizeof(*mcs
) + TLV_HDR_SIZE
+
1788 (sizeof(*he_mcs
) * param
->peer_he_mcs_count
);
1790 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
1797 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1798 WMI_TAG_PEER_ASSOC_COMPLETE_CMD
) |
1799 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1801 cmd
->vdev_id
= param
->vdev_id
;
1803 cmd
->peer_new_assoc
= param
->peer_new_assoc
;
1804 cmd
->peer_associd
= param
->peer_associd
;
1806 ath11k_wmi_copy_peer_flags(cmd
, param
);
1808 ether_addr_copy(cmd
->peer_macaddr
.addr
, param
->peer_mac
);
1810 cmd
->peer_rate_caps
= param
->peer_rate_caps
;
1811 cmd
->peer_caps
= param
->peer_caps
;
1812 cmd
->peer_listen_intval
= param
->peer_listen_intval
;
1813 cmd
->peer_ht_caps
= param
->peer_ht_caps
;
1814 cmd
->peer_max_mpdu
= param
->peer_max_mpdu
;
1815 cmd
->peer_mpdu_density
= param
->peer_mpdu_density
;
1816 cmd
->peer_vht_caps
= param
->peer_vht_caps
;
1817 cmd
->peer_phymode
= param
->peer_phymode
;
1819 /* Update 11ax capabilities */
1820 cmd
->peer_he_cap_info
= param
->peer_he_cap_macinfo
[0];
1821 cmd
->peer_he_cap_info_ext
= param
->peer_he_cap_macinfo
[1];
1822 cmd
->peer_he_cap_info_internal
= param
->peer_he_cap_macinfo_internal
;
1823 cmd
->peer_he_caps_6ghz
= param
->peer_he_caps_6ghz
;
1824 cmd
->peer_he_ops
= param
->peer_he_ops
;
1825 memcpy(&cmd
->peer_he_cap_phy
, ¶m
->peer_he_cap_phyinfo
,
1826 sizeof(param
->peer_he_cap_phyinfo
));
1827 memcpy(&cmd
->peer_ppet
, ¶m
->peer_ppet
,
1828 sizeof(param
->peer_ppet
));
1830 /* Update peer legacy rate information */
1831 ptr
+= sizeof(*cmd
);
1834 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_BYTE
) |
1835 FIELD_PREP(WMI_TLV_LEN
, peer_legacy_rates_align
);
1837 ptr
+= TLV_HDR_SIZE
;
1839 cmd
->num_peer_legacy_rates
= param
->peer_legacy_rates
.num_rates
;
1840 memcpy(ptr
, param
->peer_legacy_rates
.rates
,
1841 param
->peer_legacy_rates
.num_rates
);
1843 /* Update peer HT rate information */
1844 ptr
+= peer_legacy_rates_align
;
1847 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_BYTE
) |
1848 FIELD_PREP(WMI_TLV_LEN
, peer_ht_rates_align
);
1849 ptr
+= TLV_HDR_SIZE
;
1850 cmd
->num_peer_ht_rates
= param
->peer_ht_rates
.num_rates
;
1851 memcpy(ptr
, param
->peer_ht_rates
.rates
,
1852 param
->peer_ht_rates
.num_rates
);
1855 ptr
+= peer_ht_rates_align
;
1859 mcs
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VHT_RATE_SET
) |
1860 FIELD_PREP(WMI_TLV_LEN
, sizeof(*mcs
) - TLV_HDR_SIZE
);
1862 cmd
->peer_nss
= param
->peer_nss
;
1864 /* Update bandwidth-NSS mapping */
1865 cmd
->peer_bw_rxnss_override
= 0;
1866 cmd
->peer_bw_rxnss_override
|= param
->peer_bw_rxnss_override
;
1868 if (param
->vht_capable
) {
1869 mcs
->rx_max_rate
= param
->rx_max_rate
;
1870 mcs
->rx_mcs_set
= param
->rx_mcs_set
;
1871 mcs
->tx_max_rate
= param
->tx_max_rate
;
1872 mcs
->tx_mcs_set
= param
->tx_mcs_set
;
1876 cmd
->peer_he_mcs
= param
->peer_he_mcs_count
;
1877 cmd
->min_data_rate
= param
->min_data_rate
;
1879 ptr
+= sizeof(*mcs
);
1881 len
= param
->peer_he_mcs_count
* sizeof(*he_mcs
);
1884 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
1885 FIELD_PREP(WMI_TLV_LEN
, len
);
1886 ptr
+= TLV_HDR_SIZE
;
1888 /* Loop through the HE rate set */
1889 for (i
= 0; i
< param
->peer_he_mcs_count
; i
++) {
1891 he_mcs
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1892 WMI_TAG_HE_RATE_SET
) |
1893 FIELD_PREP(WMI_TLV_LEN
,
1894 sizeof(*he_mcs
) - TLV_HDR_SIZE
);
1896 he_mcs
->rx_mcs_set
= param
->peer_he_rx_mcs_set
[i
];
1897 he_mcs
->tx_mcs_set
= param
->peer_he_tx_mcs_set
[i
];
1898 ptr
+= sizeof(*he_mcs
);
1901 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PEER_ASSOC_CMDID
);
1904 "failed to send WMI_PEER_ASSOC_CMDID\n");
1908 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1909 "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x\n",
1910 cmd
->vdev_id
, cmd
->peer_associd
, param
->peer_mac
,
1911 cmd
->peer_flags
, cmd
->peer_rate_caps
, cmd
->peer_caps
,
1912 cmd
->peer_listen_intval
, cmd
->peer_ht_caps
,
1913 cmd
->peer_max_mpdu
, cmd
->peer_nss
, cmd
->peer_phymode
,
1914 cmd
->peer_mpdu_density
,
1915 cmd
->peer_vht_caps
, cmd
->peer_he_cap_info
,
1916 cmd
->peer_he_ops
, cmd
->peer_he_cap_info_ext
,
1917 cmd
->peer_he_cap_phy
[0], cmd
->peer_he_cap_phy
[1],
1918 cmd
->peer_he_cap_phy
[2],
1919 cmd
->peer_bw_rxnss_override
);
1924 void ath11k_wmi_start_scan_init(struct ath11k
*ar
,
1925 struct scan_req_params
*arg
)
1927 /* setup commonly used values */
1928 arg
->scan_req_id
= 1;
1929 arg
->scan_priority
= WMI_SCAN_PRIORITY_LOW
;
1930 arg
->dwell_time_active
= 50;
1931 arg
->dwell_time_active_2g
= 0;
1932 arg
->dwell_time_passive
= 150;
1933 arg
->dwell_time_active_6g
= 40;
1934 arg
->dwell_time_passive_6g
= 30;
1935 arg
->min_rest_time
= 50;
1936 arg
->max_rest_time
= 500;
1937 arg
->repeat_probe_time
= 0;
1938 arg
->probe_spacing_time
= 0;
1940 arg
->max_scan_time
= 20000;
1941 arg
->probe_delay
= 5;
1942 arg
->notify_scan_events
= WMI_SCAN_EVENT_STARTED
|
1943 WMI_SCAN_EVENT_COMPLETED
|
1944 WMI_SCAN_EVENT_BSS_CHANNEL
|
1945 WMI_SCAN_EVENT_FOREIGN_CHAN
|
1946 WMI_SCAN_EVENT_DEQUEUED
;
1947 arg
->scan_flags
|= WMI_SCAN_CHAN_STAT_EVENT
;
1952 ath11k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd
*cmd
,
1953 struct scan_req_params
*param
)
1955 /* Scan events subscription */
1956 if (param
->scan_ev_started
)
1957 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_STARTED
;
1958 if (param
->scan_ev_completed
)
1959 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_COMPLETED
;
1960 if (param
->scan_ev_bss_chan
)
1961 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_BSS_CHANNEL
;
1962 if (param
->scan_ev_foreign_chan
)
1963 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_FOREIGN_CHAN
;
1964 if (param
->scan_ev_dequeued
)
1965 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_DEQUEUED
;
1966 if (param
->scan_ev_preempted
)
1967 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_PREEMPTED
;
1968 if (param
->scan_ev_start_failed
)
1969 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_START_FAILED
;
1970 if (param
->scan_ev_restarted
)
1971 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_RESTARTED
;
1972 if (param
->scan_ev_foreign_chn_exit
)
1973 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT
;
1974 if (param
->scan_ev_suspended
)
1975 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_SUSPENDED
;
1976 if (param
->scan_ev_resumed
)
1977 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_RESUMED
;
1979 /** Set scan control flags */
1980 cmd
->scan_ctrl_flags
= 0;
1981 if (param
->scan_f_passive
)
1982 cmd
->scan_ctrl_flags
|= WMI_SCAN_FLAG_PASSIVE
;
1983 if (param
->scan_f_strict_passive_pch
)
1984 cmd
->scan_ctrl_flags
|= WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN
;
1985 if (param
->scan_f_promisc_mode
)
1986 cmd
->scan_ctrl_flags
|= WMI_SCAN_FILTER_PROMISCUOS
;
1987 if (param
->scan_f_capture_phy_err
)
1988 cmd
->scan_ctrl_flags
|= WMI_SCAN_CAPTURE_PHY_ERROR
;
1989 if (param
->scan_f_half_rate
)
1990 cmd
->scan_ctrl_flags
|= WMI_SCAN_FLAG_HALF_RATE_SUPPORT
;
1991 if (param
->scan_f_quarter_rate
)
1992 cmd
->scan_ctrl_flags
|= WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT
;
1993 if (param
->scan_f_cck_rates
)
1994 cmd
->scan_ctrl_flags
|= WMI_SCAN_ADD_CCK_RATES
;
1995 if (param
->scan_f_ofdm_rates
)
1996 cmd
->scan_ctrl_flags
|= WMI_SCAN_ADD_OFDM_RATES
;
1997 if (param
->scan_f_chan_stat_evnt
)
1998 cmd
->scan_ctrl_flags
|= WMI_SCAN_CHAN_STAT_EVENT
;
1999 if (param
->scan_f_filter_prb_req
)
2000 cmd
->scan_ctrl_flags
|= WMI_SCAN_FILTER_PROBE_REQ
;
2001 if (param
->scan_f_bcast_probe
)
2002 cmd
->scan_ctrl_flags
|= WMI_SCAN_ADD_BCAST_PROBE_REQ
;
2003 if (param
->scan_f_offchan_mgmt_tx
)
2004 cmd
->scan_ctrl_flags
|= WMI_SCAN_OFFCHAN_MGMT_TX
;
2005 if (param
->scan_f_offchan_data_tx
)
2006 cmd
->scan_ctrl_flags
|= WMI_SCAN_OFFCHAN_DATA_TX
;
2007 if (param
->scan_f_force_active_dfs_chn
)
2008 cmd
->scan_ctrl_flags
|= WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS
;
2009 if (param
->scan_f_add_tpc_ie_in_probe
)
2010 cmd
->scan_ctrl_flags
|= WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ
;
2011 if (param
->scan_f_add_ds_ie_in_probe
)
2012 cmd
->scan_ctrl_flags
|= WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ
;
2013 if (param
->scan_f_add_spoofed_mac_in_probe
)
2014 cmd
->scan_ctrl_flags
|= WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ
;
2015 if (param
->scan_f_add_rand_seq_in_probe
)
2016 cmd
->scan_ctrl_flags
|= WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ
;
2017 if (param
->scan_f_en_ie_whitelist_in_probe
)
2018 cmd
->scan_ctrl_flags
|=
2019 WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ
;
2021 /* for adaptive scan mode using 3 bits (21 - 23 bits) */
2022 WMI_SCAN_SET_DWELL_MODE(cmd
->scan_ctrl_flags
,
2023 param
->adaptive_dwell_time_mode
);
2026 int ath11k_wmi_send_scan_start_cmd(struct ath11k
*ar
,
2027 struct scan_req_params
*params
)
2029 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2030 struct wmi_start_scan_cmd
*cmd
;
2031 struct wmi_ssid
*ssid
= NULL
;
2032 struct wmi_mac_addr
*bssid
;
2033 struct sk_buff
*skb
;
2034 struct wmi_tlv
*tlv
;
2038 u8 extraie_len_with_pad
= 0;
2039 struct hint_short_ssid
*s_ssid
= NULL
;
2040 struct hint_bssid
*hint_bssid
= NULL
;
2044 len
+= TLV_HDR_SIZE
;
2045 if (params
->num_chan
)
2046 len
+= params
->num_chan
* sizeof(u32
);
2048 len
+= TLV_HDR_SIZE
;
2049 if (params
->num_ssids
)
2050 len
+= params
->num_ssids
* sizeof(*ssid
);
2052 len
+= TLV_HDR_SIZE
;
2053 if (params
->num_bssid
)
2054 len
+= sizeof(*bssid
) * params
->num_bssid
;
2056 len
+= TLV_HDR_SIZE
;
2057 if (params
->extraie
.len
)
2058 extraie_len_with_pad
=
2059 roundup(params
->extraie
.len
, sizeof(u32
));
2060 len
+= extraie_len_with_pad
;
2062 if (params
->num_hint_bssid
)
2063 len
+= TLV_HDR_SIZE
+
2064 params
->num_hint_bssid
* sizeof(struct hint_bssid
);
2066 if (params
->num_hint_s_ssid
)
2067 len
+= TLV_HDR_SIZE
+
2068 params
->num_hint_s_ssid
* sizeof(struct hint_short_ssid
);
2070 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2077 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_START_SCAN_CMD
) |
2078 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2080 cmd
->scan_id
= params
->scan_id
;
2081 cmd
->scan_req_id
= params
->scan_req_id
;
2082 cmd
->vdev_id
= params
->vdev_id
;
2083 cmd
->scan_priority
= params
->scan_priority
;
2084 cmd
->notify_scan_events
= params
->notify_scan_events
;
2086 ath11k_wmi_copy_scan_event_cntrl_flags(cmd
, params
);
2088 cmd
->dwell_time_active
= params
->dwell_time_active
;
2089 cmd
->dwell_time_active_2g
= params
->dwell_time_active_2g
;
2090 cmd
->dwell_time_passive
= params
->dwell_time_passive
;
2091 cmd
->dwell_time_active_6g
= params
->dwell_time_active_6g
;
2092 cmd
->dwell_time_passive_6g
= params
->dwell_time_passive_6g
;
2093 cmd
->min_rest_time
= params
->min_rest_time
;
2094 cmd
->max_rest_time
= params
->max_rest_time
;
2095 cmd
->repeat_probe_time
= params
->repeat_probe_time
;
2096 cmd
->probe_spacing_time
= params
->probe_spacing_time
;
2097 cmd
->idle_time
= params
->idle_time
;
2098 cmd
->max_scan_time
= params
->max_scan_time
;
2099 cmd
->probe_delay
= params
->probe_delay
;
2100 cmd
->burst_duration
= params
->burst_duration
;
2101 cmd
->num_chan
= params
->num_chan
;
2102 cmd
->num_bssid
= params
->num_bssid
;
2103 cmd
->num_ssids
= params
->num_ssids
;
2104 cmd
->ie_len
= params
->extraie
.len
;
2105 cmd
->n_probes
= params
->n_probes
;
2107 ptr
+= sizeof(*cmd
);
2109 len
= params
->num_chan
* sizeof(u32
);
2112 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_UINT32
) |
2113 FIELD_PREP(WMI_TLV_LEN
, len
);
2114 ptr
+= TLV_HDR_SIZE
;
2115 tmp_ptr
= (u32
*)ptr
;
2117 for (i
= 0; i
< params
->num_chan
; ++i
)
2118 tmp_ptr
[i
] = params
->chan_list
[i
];
2122 len
= params
->num_ssids
* sizeof(*ssid
);
2124 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_FIXED_STRUCT
) |
2125 FIELD_PREP(WMI_TLV_LEN
, len
);
2127 ptr
+= TLV_HDR_SIZE
;
2129 if (params
->num_ssids
) {
2131 for (i
= 0; i
< params
->num_ssids
; ++i
) {
2132 ssid
->ssid_len
= params
->ssid
[i
].length
;
2133 memcpy(ssid
->ssid
, params
->ssid
[i
].ssid
,
2134 params
->ssid
[i
].length
);
2139 ptr
+= (params
->num_ssids
* sizeof(*ssid
));
2140 len
= params
->num_bssid
* sizeof(*bssid
);
2142 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_FIXED_STRUCT
) |
2143 FIELD_PREP(WMI_TLV_LEN
, len
);
2145 ptr
+= TLV_HDR_SIZE
;
2148 if (params
->num_bssid
) {
2149 for (i
= 0; i
< params
->num_bssid
; ++i
) {
2150 ether_addr_copy(bssid
->addr
,
2151 params
->bssid_list
[i
].addr
);
2156 ptr
+= params
->num_bssid
* sizeof(*bssid
);
2158 len
= extraie_len_with_pad
;
2160 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_BYTE
) |
2161 FIELD_PREP(WMI_TLV_LEN
, len
);
2162 ptr
+= TLV_HDR_SIZE
;
2164 if (params
->extraie
.len
)
2165 memcpy(ptr
, params
->extraie
.ptr
,
2166 params
->extraie
.len
);
2168 ptr
+= extraie_len_with_pad
;
2170 if (params
->num_hint_s_ssid
) {
2171 len
= params
->num_hint_s_ssid
* sizeof(struct hint_short_ssid
);
2173 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_FIXED_STRUCT
) |
2174 FIELD_PREP(WMI_TLV_LEN
, len
);
2175 ptr
+= TLV_HDR_SIZE
;
2177 for (i
= 0; i
< params
->num_hint_s_ssid
; ++i
) {
2178 s_ssid
->freq_flags
= params
->hint_s_ssid
[i
].freq_flags
;
2179 s_ssid
->short_ssid
= params
->hint_s_ssid
[i
].short_ssid
;
2185 if (params
->num_hint_bssid
) {
2186 len
= params
->num_hint_bssid
* sizeof(struct hint_bssid
);
2188 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_FIXED_STRUCT
) |
2189 FIELD_PREP(WMI_TLV_LEN
, len
);
2190 ptr
+= TLV_HDR_SIZE
;
2192 for (i
= 0; i
< params
->num_hint_bssid
; ++i
) {
2193 hint_bssid
->freq_flags
=
2194 params
->hint_bssid
[i
].freq_flags
;
2195 ether_addr_copy(¶ms
->hint_bssid
[i
].bssid
.addr
[0],
2196 &hint_bssid
->bssid
.addr
[0]);
2201 len
= params
->num_hint_s_ssid
* sizeof(struct hint_short_ssid
);
2203 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_FIXED_STRUCT
) |
2204 FIELD_PREP(WMI_TLV_LEN
, len
);
2205 ptr
+= TLV_HDR_SIZE
;
2206 if (params
->num_hint_s_ssid
) {
2208 for (i
= 0; i
< params
->num_hint_s_ssid
; ++i
) {
2209 s_ssid
->freq_flags
= params
->hint_s_ssid
[i
].freq_flags
;
2210 s_ssid
->short_ssid
= params
->hint_s_ssid
[i
].short_ssid
;
2216 len
= params
->num_hint_bssid
* sizeof(struct hint_bssid
);
2218 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_FIXED_STRUCT
) |
2219 FIELD_PREP(WMI_TLV_LEN
, len
);
2220 ptr
+= TLV_HDR_SIZE
;
2221 if (params
->num_hint_bssid
) {
2223 for (i
= 0; i
< params
->num_hint_bssid
; ++i
) {
2224 hint_bssid
->freq_flags
=
2225 params
->hint_bssid
[i
].freq_flags
;
2226 ether_addr_copy(¶ms
->hint_bssid
[i
].bssid
.addr
[0],
2227 &hint_bssid
->bssid
.addr
[0]);
2232 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2233 WMI_START_SCAN_CMDID
);
2235 ath11k_warn(ar
->ab
, "failed to send WMI_START_SCAN_CMDID\n");
2242 int ath11k_wmi_send_scan_stop_cmd(struct ath11k
*ar
,
2243 struct scan_cancel_param
*param
)
2245 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2246 struct wmi_stop_scan_cmd
*cmd
;
2247 struct sk_buff
*skb
;
2250 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2254 cmd
= (struct wmi_stop_scan_cmd
*)skb
->data
;
2256 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_STOP_SCAN_CMD
) |
2257 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2259 cmd
->vdev_id
= param
->vdev_id
;
2260 cmd
->requestor
= param
->requester
;
2261 cmd
->scan_id
= param
->scan_id
;
2262 cmd
->pdev_id
= param
->pdev_id
;
2263 /* stop the scan with the corresponding scan_id */
2264 if (param
->req_type
== WLAN_SCAN_CANCEL_PDEV_ALL
) {
2265 /* Cancelling all scans */
2266 cmd
->req_type
= WMI_SCAN_STOP_ALL
;
2267 } else if (param
->req_type
== WLAN_SCAN_CANCEL_VDEV_ALL
) {
2268 /* Cancelling VAP scans */
2269 cmd
->req_type
= WMI_SCN_STOP_VAP_ALL
;
2270 } else if (param
->req_type
== WLAN_SCAN_CANCEL_SINGLE
) {
2271 /* Cancelling specific scan */
2272 cmd
->req_type
= WMI_SCAN_STOP_ONE
;
2274 ath11k_warn(ar
->ab
, "invalid scan cancel param %d",
2280 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2281 WMI_STOP_SCAN_CMDID
);
2283 ath11k_warn(ar
->ab
, "failed to send WMI_STOP_SCAN_CMDID\n");
2290 int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k
*ar
,
2291 struct scan_chan_list_params
*chan_list
)
2293 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2294 struct wmi_scan_chan_list_cmd
*cmd
;
2295 struct sk_buff
*skb
;
2296 struct wmi_channel
*chan_info
;
2297 struct channel_param
*tchan_info
;
2298 struct wmi_tlv
*tlv
;
2301 u16 num_send_chans
, num_sends
= 0, max_chan_limit
= 0;
2304 tchan_info
= &chan_list
->ch_param
[0];
2305 while (chan_list
->nallchans
) {
2306 len
= sizeof(*cmd
) + TLV_HDR_SIZE
;
2307 max_chan_limit
= (wmi
->wmi_ab
->max_msg_len
[ar
->pdev_idx
] - len
) /
2310 if (chan_list
->nallchans
> max_chan_limit
)
2311 num_send_chans
= max_chan_limit
;
2313 num_send_chans
= chan_list
->nallchans
;
2315 chan_list
->nallchans
-= num_send_chans
;
2316 len
+= sizeof(*chan_info
) * num_send_chans
;
2318 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2322 cmd
= (struct wmi_scan_chan_list_cmd
*)skb
->data
;
2323 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_SCAN_CHAN_LIST_CMD
) |
2324 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2325 cmd
->pdev_id
= chan_list
->pdev_id
;
2326 cmd
->num_scan_chans
= num_send_chans
;
2328 cmd
->flags
|= WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG
;
2330 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2331 "WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n",
2332 num_send_chans
, len
, cmd
->pdev_id
, num_sends
);
2334 ptr
= skb
->data
+ sizeof(*cmd
);
2336 len
= sizeof(*chan_info
) * num_send_chans
;
2338 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
2339 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
2340 ptr
+= TLV_HDR_SIZE
;
2342 for (i
= 0; i
< num_send_chans
; ++i
) {
2344 memset(chan_info
, 0, sizeof(*chan_info
));
2345 len
= sizeof(*chan_info
);
2346 chan_info
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
2348 FIELD_PREP(WMI_TLV_LEN
,
2349 len
- TLV_HDR_SIZE
);
2351 reg1
= &chan_info
->reg_info_1
;
2352 reg2
= &chan_info
->reg_info_2
;
2353 chan_info
->mhz
= tchan_info
->mhz
;
2354 chan_info
->band_center_freq1
= tchan_info
->cfreq1
;
2355 chan_info
->band_center_freq2
= tchan_info
->cfreq2
;
2357 if (tchan_info
->is_chan_passive
)
2358 chan_info
->info
|= WMI_CHAN_INFO_PASSIVE
;
2359 if (tchan_info
->allow_he
)
2360 chan_info
->info
|= WMI_CHAN_INFO_ALLOW_HE
;
2361 else if (tchan_info
->allow_vht
)
2362 chan_info
->info
|= WMI_CHAN_INFO_ALLOW_VHT
;
2363 else if (tchan_info
->allow_ht
)
2364 chan_info
->info
|= WMI_CHAN_INFO_ALLOW_HT
;
2365 if (tchan_info
->half_rate
)
2366 chan_info
->info
|= WMI_CHAN_INFO_HALF_RATE
;
2367 if (tchan_info
->quarter_rate
)
2368 chan_info
->info
|= WMI_CHAN_INFO_QUARTER_RATE
;
2369 if (tchan_info
->psc_channel
)
2370 chan_info
->info
|= WMI_CHAN_INFO_PSC
;
2372 chan_info
->info
|= FIELD_PREP(WMI_CHAN_INFO_MODE
,
2373 tchan_info
->phy_mode
);
2374 *reg1
|= FIELD_PREP(WMI_CHAN_REG_INFO1_MIN_PWR
,
2375 tchan_info
->minpower
);
2376 *reg1
|= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR
,
2377 tchan_info
->maxpower
);
2378 *reg1
|= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR
,
2379 tchan_info
->maxregpower
);
2380 *reg1
|= FIELD_PREP(WMI_CHAN_REG_INFO1_REG_CLS
,
2381 tchan_info
->reg_class_id
);
2382 *reg2
|= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX
,
2383 tchan_info
->antennamax
);
2385 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2386 "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n",
2387 i
, chan_info
->mhz
, chan_info
->info
);
2389 ptr
+= sizeof(*chan_info
);
2394 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_SCAN_CHAN_LIST_CMDID
);
2396 ath11k_warn(ar
->ab
, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
2407 int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k
*ar
, u32 vdev_id
,
2408 struct wmi_wmm_params_all_arg
*param
)
2410 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2411 struct wmi_vdev_set_wmm_params_cmd
*cmd
;
2412 struct wmi_wmm_params
*wmm_param
;
2413 struct wmi_wmm_params_arg
*wmi_wmm_arg
;
2414 struct sk_buff
*skb
;
2417 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2421 cmd
= (struct wmi_vdev_set_wmm_params_cmd
*)skb
->data
;
2422 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
2423 WMI_TAG_VDEV_SET_WMM_PARAMS_CMD
) |
2424 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2426 cmd
->vdev_id
= vdev_id
;
2427 cmd
->wmm_param_type
= 0;
2429 for (ac
= 0; ac
< WME_NUM_AC
; ac
++) {
2432 wmi_wmm_arg
= ¶m
->ac_be
;
2435 wmi_wmm_arg
= ¶m
->ac_bk
;
2438 wmi_wmm_arg
= ¶m
->ac_vi
;
2441 wmi_wmm_arg
= ¶m
->ac_vo
;
2445 wmm_param
= (struct wmi_wmm_params
*)&cmd
->wmm_params
[ac
];
2446 wmm_param
->tlv_header
=
2447 FIELD_PREP(WMI_TLV_TAG
,
2448 WMI_TAG_VDEV_SET_WMM_PARAMS_CMD
) |
2449 FIELD_PREP(WMI_TLV_LEN
,
2450 sizeof(*wmm_param
) - TLV_HDR_SIZE
);
2452 wmm_param
->aifs
= wmi_wmm_arg
->aifs
;
2453 wmm_param
->cwmin
= wmi_wmm_arg
->cwmin
;
2454 wmm_param
->cwmax
= wmi_wmm_arg
->cwmax
;
2455 wmm_param
->txoplimit
= wmi_wmm_arg
->txop
;
2456 wmm_param
->acm
= wmi_wmm_arg
->acm
;
2457 wmm_param
->no_ack
= wmi_wmm_arg
->no_ack
;
2459 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2460 "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
2461 ac
, wmm_param
->aifs
, wmm_param
->cwmin
,
2462 wmm_param
->cwmax
, wmm_param
->txoplimit
,
2463 wmm_param
->acm
, wmm_param
->no_ack
);
2465 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2466 WMI_VDEV_SET_WMM_PARAMS_CMDID
);
2469 "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID");
2476 int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k
*ar
,
2479 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2480 struct wmi_dfs_phyerr_offload_cmd
*cmd
;
2481 struct sk_buff
*skb
;
2484 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2488 cmd
= (struct wmi_dfs_phyerr_offload_cmd
*)skb
->data
;
2490 FIELD_PREP(WMI_TLV_TAG
,
2491 WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD
) |
2492 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2494 cmd
->pdev_id
= pdev_id
;
2496 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2497 "WMI dfs phy err offload enable pdev id %d\n", pdev_id
);
2499 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2500 WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID
);
2503 "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n");
2510 int ath11k_wmi_delba_send(struct ath11k
*ar
, u32 vdev_id
, const u8
*mac
,
2511 u32 tid
, u32 initiator
, u32 reason
)
2513 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2514 struct wmi_delba_send_cmd
*cmd
;
2515 struct sk_buff
*skb
;
2518 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2522 cmd
= (struct wmi_delba_send_cmd
*)skb
->data
;
2523 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_DELBA_SEND_CMD
) |
2524 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2525 cmd
->vdev_id
= vdev_id
;
2526 ether_addr_copy(cmd
->peer_macaddr
.addr
, mac
);
2528 cmd
->initiator
= initiator
;
2529 cmd
->reasoncode
= reason
;
2531 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2532 "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
2533 vdev_id
, mac
, tid
, initiator
, reason
);
2535 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_DELBA_SEND_CMDID
);
2539 "failed to send WMI_DELBA_SEND_CMDID cmd\n");
2546 int ath11k_wmi_addba_set_resp(struct ath11k
*ar
, u32 vdev_id
, const u8
*mac
,
2547 u32 tid
, u32 status
)
2549 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2550 struct wmi_addba_setresponse_cmd
*cmd
;
2551 struct sk_buff
*skb
;
2554 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2558 cmd
= (struct wmi_addba_setresponse_cmd
*)skb
->data
;
2560 FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ADDBA_SETRESPONSE_CMD
) |
2561 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2562 cmd
->vdev_id
= vdev_id
;
2563 ether_addr_copy(cmd
->peer_macaddr
.addr
, mac
);
2565 cmd
->statuscode
= status
;
2567 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2568 "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
2569 vdev_id
, mac
, tid
, status
);
2571 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_ADDBA_SET_RESP_CMDID
);
2575 "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n");
2582 int ath11k_wmi_addba_send(struct ath11k
*ar
, u32 vdev_id
, const u8
*mac
,
2583 u32 tid
, u32 buf_size
)
2585 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2586 struct wmi_addba_send_cmd
*cmd
;
2587 struct sk_buff
*skb
;
2590 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2594 cmd
= (struct wmi_addba_send_cmd
*)skb
->data
;
2595 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ADDBA_SEND_CMD
) |
2596 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2597 cmd
->vdev_id
= vdev_id
;
2598 ether_addr_copy(cmd
->peer_macaddr
.addr
, mac
);
2600 cmd
->buffersize
= buf_size
;
2602 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2603 "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
2604 vdev_id
, mac
, tid
, buf_size
);
2606 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_ADDBA_SEND_CMDID
);
2610 "failed to send WMI_ADDBA_SEND_CMDID cmd\n");
2617 int ath11k_wmi_addba_clear_resp(struct ath11k
*ar
, u32 vdev_id
, const u8
*mac
)
2619 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2620 struct wmi_addba_clear_resp_cmd
*cmd
;
2621 struct sk_buff
*skb
;
2624 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2628 cmd
= (struct wmi_addba_clear_resp_cmd
*)skb
->data
;
2630 FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ADDBA_CLEAR_RESP_CMD
) |
2631 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2632 cmd
->vdev_id
= vdev_id
;
2633 ether_addr_copy(cmd
->peer_macaddr
.addr
, mac
);
2635 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2636 "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
2639 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_ADDBA_CLEAR_RESP_CMDID
);
2643 "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n");
2650 int ath11k_wmi_pdev_peer_pktlog_filter(struct ath11k
*ar
, u8
*addr
, u8 enable
)
2652 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2653 struct wmi_pdev_pktlog_filter_cmd
*cmd
;
2654 struct wmi_pdev_pktlog_filter_info
*info
;
2655 struct sk_buff
*skb
;
2656 struct wmi_tlv
*tlv
;
2660 len
= sizeof(*cmd
) + sizeof(*info
) + TLV_HDR_SIZE
;
2661 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2665 cmd
= (struct wmi_pdev_pktlog_filter_cmd
*)skb
->data
;
2667 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD
) |
2668 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2670 cmd
->pdev_id
= DP_HW2SW_MACID(ar
->pdev
->pdev_id
);
2672 cmd
->enable
= enable
;
2674 ptr
= skb
->data
+ sizeof(*cmd
);
2677 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
2678 FIELD_PREP(WMI_TLV_LEN
, sizeof(*info
));
2680 ptr
+= TLV_HDR_SIZE
;
2683 ether_addr_copy(info
->peer_macaddr
.addr
, addr
);
2684 info
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO
) |
2685 FIELD_PREP(WMI_TLV_LEN
,
2686 sizeof(*info
) - TLV_HDR_SIZE
);
2688 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2689 WMI_PDEV_PKTLOG_FILTER_CMDID
);
2691 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
2699 ath11k_wmi_send_init_country_cmd(struct ath11k
*ar
,
2700 struct wmi_init_country_params init_cc_params
)
2702 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2703 struct wmi_init_country_cmd
*cmd
;
2704 struct sk_buff
*skb
;
2707 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2711 cmd
= (struct wmi_init_country_cmd
*)skb
->data
;
2713 FIELD_PREP(WMI_TLV_TAG
,
2714 WMI_TAG_SET_INIT_COUNTRY_CMD
) |
2715 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2717 cmd
->pdev_id
= ar
->pdev
->pdev_id
;
2719 switch (init_cc_params
.flags
) {
2721 cmd
->init_cc_type
= WMI_COUNTRY_INFO_TYPE_ALPHA
;
2722 memcpy((u8
*)&cmd
->cc_info
.alpha2
,
2723 init_cc_params
.cc_info
.alpha2
, 3);
2726 cmd
->init_cc_type
= WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE
;
2727 cmd
->cc_info
.country_code
= init_cc_params
.cc_info
.country_code
;
2730 cmd
->init_cc_type
= WMI_COUNTRY_INFO_TYPE_REGDOMAIN
;
2731 cmd
->cc_info
.regdom_id
= init_cc_params
.cc_info
.regdom_id
;
2738 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2739 WMI_SET_INIT_COUNTRY_CMDID
);
2744 "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n",
2753 ath11k_wmi_send_thermal_mitigation_param_cmd(struct ath11k
*ar
,
2754 struct thermal_mitigation_params
*param
)
2756 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2757 struct wmi_therm_throt_config_request_cmd
*cmd
;
2758 struct wmi_therm_throt_level_config_info
*lvl_conf
;
2759 struct wmi_tlv
*tlv
;
2760 struct sk_buff
*skb
;
2763 len
= sizeof(*cmd
) + TLV_HDR_SIZE
+
2764 THERMAL_LEVELS
* sizeof(struct wmi_therm_throt_level_config_info
);
2766 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2770 cmd
= (struct wmi_therm_throt_config_request_cmd
*)skb
->data
;
2772 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_THERM_THROT_CONFIG_REQUEST
) |
2773 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2775 cmd
->pdev_id
= ar
->pdev
->pdev_id
;
2776 cmd
->enable
= param
->enable
;
2777 cmd
->dc
= param
->dc
;
2778 cmd
->dc_per_event
= param
->dc_per_event
;
2779 cmd
->therm_throt_levels
= THERMAL_LEVELS
;
2781 tlv
= (struct wmi_tlv
*)(skb
->data
+ sizeof(*cmd
));
2782 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
2783 FIELD_PREP(WMI_TLV_LEN
,
2785 sizeof(struct wmi_therm_throt_level_config_info
)));
2787 lvl_conf
= (struct wmi_therm_throt_level_config_info
*)(skb
->data
+
2790 for (i
= 0; i
< THERMAL_LEVELS
; i
++) {
2791 lvl_conf
->tlv_header
=
2792 FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_THERM_THROT_LEVEL_CONFIG_INFO
) |
2793 FIELD_PREP(WMI_TLV_LEN
, sizeof(*lvl_conf
) - TLV_HDR_SIZE
);
2795 lvl_conf
->temp_lwm
= param
->levelconf
[i
].tmplwm
;
2796 lvl_conf
->temp_hwm
= param
->levelconf
[i
].tmphwm
;
2797 lvl_conf
->dc_off_percent
= param
->levelconf
[i
].dcoffpercent
;
2798 lvl_conf
->prio
= param
->levelconf
[i
].priority
;
2802 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_THERM_THROT_SET_CONF_CMDID
);
2804 ath11k_warn(ar
->ab
, "failed to send THERM_THROT_SET_CONF cmd\n");
2808 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2809 "WMI vdev set thermal throt pdev_id %d enable %d dc %d dc_per_event %x levels %d\n",
2810 ar
->pdev
->pdev_id
, param
->enable
, param
->dc
,
2811 param
->dc_per_event
, THERMAL_LEVELS
);
2816 int ath11k_wmi_pdev_pktlog_enable(struct ath11k
*ar
, u32 pktlog_filter
)
2818 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2819 struct wmi_pktlog_enable_cmd
*cmd
;
2820 struct sk_buff
*skb
;
2823 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2827 cmd
= (struct wmi_pktlog_enable_cmd
*)skb
->data
;
2829 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_PKTLOG_ENABLE_CMD
) |
2830 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2832 cmd
->pdev_id
= DP_HW2SW_MACID(ar
->pdev
->pdev_id
);
2833 cmd
->evlist
= pktlog_filter
;
2834 cmd
->enable
= ATH11K_WMI_PKTLOG_ENABLE_FORCE
;
2836 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2837 WMI_PDEV_PKTLOG_ENABLE_CMDID
);
2839 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
2846 int ath11k_wmi_pdev_pktlog_disable(struct ath11k
*ar
)
2848 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2849 struct wmi_pktlog_disable_cmd
*cmd
;
2850 struct sk_buff
*skb
;
2853 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2857 cmd
= (struct wmi_pktlog_disable_cmd
*)skb
->data
;
2859 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_PKTLOG_DISABLE_CMD
) |
2860 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2862 cmd
->pdev_id
= DP_HW2SW_MACID(ar
->pdev
->pdev_id
);
2864 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2865 WMI_PDEV_PKTLOG_DISABLE_CMDID
);
2867 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
2875 ath11k_wmi_send_twt_enable_cmd(struct ath11k
*ar
, u32 pdev_id
)
2877 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2878 struct ath11k_base
*ab
= wmi
->wmi_ab
->ab
;
2879 struct wmi_twt_enable_params_cmd
*cmd
;
2880 struct sk_buff
*skb
;
2885 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2889 cmd
= (struct wmi_twt_enable_params_cmd
*)skb
->data
;
2890 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_TWT_ENABLE_CMD
) |
2891 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
2892 cmd
->pdev_id
= pdev_id
;
2893 cmd
->sta_cong_timer_ms
= ATH11K_TWT_DEF_STA_CONG_TIMER_MS
;
2894 cmd
->default_slot_size
= ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE
;
2895 cmd
->congestion_thresh_setup
= ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP
;
2896 cmd
->congestion_thresh_teardown
=
2897 ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN
;
2898 cmd
->congestion_thresh_critical
=
2899 ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL
;
2900 cmd
->interference_thresh_teardown
=
2901 ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN
;
2902 cmd
->interference_thresh_setup
=
2903 ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP
;
2904 cmd
->min_no_sta_setup
= ATH11K_TWT_DEF_MIN_NO_STA_SETUP
;
2905 cmd
->min_no_sta_teardown
= ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN
;
2906 cmd
->no_of_bcast_mcast_slots
= ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS
;
2907 cmd
->min_no_twt_slots
= ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS
;
2908 cmd
->max_no_sta_twt
= ATH11K_TWT_DEF_MAX_NO_STA_TWT
;
2909 cmd
->mode_check_interval
= ATH11K_TWT_DEF_MODE_CHECK_INTERVAL
;
2910 cmd
->add_sta_slot_interval
= ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL
;
2911 cmd
->remove_sta_slot_interval
=
2912 ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL
;
2913 /* TODO add MBSSID support */
2914 cmd
->mbss_support
= 0;
2916 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2917 WMI_TWT_ENABLE_CMDID
);
2919 ath11k_warn(ab
, "Failed to send WMI_TWT_ENABLE_CMDID");
2926 ath11k_wmi_send_twt_disable_cmd(struct ath11k
*ar
, u32 pdev_id
)
2928 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2929 struct ath11k_base
*ab
= wmi
->wmi_ab
->ab
;
2930 struct wmi_twt_disable_params_cmd
*cmd
;
2931 struct sk_buff
*skb
;
2936 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2940 cmd
= (struct wmi_twt_disable_params_cmd
*)skb
->data
;
2941 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_TWT_DISABLE_CMD
) |
2942 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
2943 cmd
->pdev_id
= pdev_id
;
2945 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2946 WMI_TWT_DISABLE_CMDID
);
2948 ath11k_warn(ab
, "Failed to send WMI_TWT_DISABLE_CMDID");
2955 ath11k_wmi_send_obss_spr_cmd(struct ath11k
*ar
, u32 vdev_id
,
2956 struct ieee80211_he_obss_pd
*he_obss_pd
)
2958 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2959 struct ath11k_base
*ab
= wmi
->wmi_ab
->ab
;
2960 struct wmi_obss_spatial_reuse_params_cmd
*cmd
;
2961 struct sk_buff
*skb
;
2966 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2970 cmd
= (struct wmi_obss_spatial_reuse_params_cmd
*)skb
->data
;
2971 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
2972 WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD
) |
2973 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
2974 cmd
->vdev_id
= vdev_id
;
2975 cmd
->enable
= he_obss_pd
->enable
;
2976 cmd
->obss_min
= he_obss_pd
->min_offset
;
2977 cmd
->obss_max
= he_obss_pd
->max_offset
;
2979 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2980 WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID
);
2983 "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID");
2990 ath11k_wmi_send_obss_color_collision_cfg_cmd(struct ath11k
*ar
, u32 vdev_id
,
2991 u8 bss_color
, u32 period
,
2994 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2995 struct ath11k_base
*ab
= wmi
->wmi_ab
->ab
;
2996 struct wmi_obss_color_collision_cfg_params_cmd
*cmd
;
2997 struct sk_buff
*skb
;
3002 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
3006 cmd
= (struct wmi_obss_color_collision_cfg_params_cmd
*)skb
->data
;
3007 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
3008 WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG
) |
3009 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
3010 cmd
->vdev_id
= vdev_id
;
3011 cmd
->evt_type
= enable
? ATH11K_OBSS_COLOR_COLLISION_DETECTION
:
3012 ATH11K_OBSS_COLOR_COLLISION_DETECTION_DISABLE
;
3013 cmd
->current_bss_color
= bss_color
;
3014 cmd
->detection_period_ms
= period
;
3015 cmd
->scan_period_ms
= ATH11K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS
;
3016 cmd
->free_slot_expiry_time_ms
= 0;
3019 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
3020 "wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n",
3021 cmd
->vdev_id
, cmd
->evt_type
, cmd
->current_bss_color
,
3022 cmd
->detection_period_ms
, cmd
->scan_period_ms
);
3024 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
3025 WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID
);
3027 ath11k_warn(ab
, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID");
3033 int ath11k_wmi_send_bss_color_change_enable_cmd(struct ath11k
*ar
, u32 vdev_id
,
3036 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
3037 struct ath11k_base
*ab
= wmi
->wmi_ab
->ab
;
3038 struct wmi_bss_color_change_enable_params_cmd
*cmd
;
3039 struct sk_buff
*skb
;
3044 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
3048 cmd
= (struct wmi_bss_color_change_enable_params_cmd
*)skb
->data
;
3049 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_BSS_COLOR_CHANGE_ENABLE
) |
3050 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
3051 cmd
->vdev_id
= vdev_id
;
3052 cmd
->enable
= enable
? 1 : 0;
3054 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
3055 "wmi_send_bss_color_change_enable id %d enable %d\n",
3056 cmd
->vdev_id
, cmd
->enable
);
3058 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
3059 WMI_BSS_COLOR_CHANGE_ENABLE_CMDID
);
3061 ath11k_warn(ab
, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID");
3068 ath11k_fill_band_to_mac_param(struct ath11k_base
*soc
,
3069 struct wmi_host_pdev_band_to_mac
*band_to_mac
)
3072 struct ath11k_hal_reg_capabilities_ext
*hal_reg_cap
;
3073 struct ath11k_pdev
*pdev
;
3075 for (i
= 0; i
< soc
->num_radios
; i
++) {
3076 pdev
= &soc
->pdevs
[i
];
3077 hal_reg_cap
= &soc
->hal_reg_cap
[i
];
3078 band_to_mac
[i
].pdev_id
= pdev
->pdev_id
;
3080 switch (pdev
->cap
.supported_bands
) {
3081 case WMI_HOST_WLAN_2G_5G_CAP
:
3082 band_to_mac
[i
].start_freq
= hal_reg_cap
->low_2ghz_chan
;
3083 band_to_mac
[i
].end_freq
= hal_reg_cap
->high_5ghz_chan
;
3085 case WMI_HOST_WLAN_2G_CAP
:
3086 band_to_mac
[i
].start_freq
= hal_reg_cap
->low_2ghz_chan
;
3087 band_to_mac
[i
].end_freq
= hal_reg_cap
->high_2ghz_chan
;
3089 case WMI_HOST_WLAN_5G_CAP
:
3090 band_to_mac
[i
].start_freq
= hal_reg_cap
->low_5ghz_chan
;
3091 band_to_mac
[i
].end_freq
= hal_reg_cap
->high_5ghz_chan
;
3100 ath11k_wmi_copy_resource_config(struct wmi_resource_config
*wmi_cfg
,
3101 struct target_resource_config
*tg_cfg
)
3103 wmi_cfg
->num_vdevs
= tg_cfg
->num_vdevs
;
3104 wmi_cfg
->num_peers
= tg_cfg
->num_peers
;
3105 wmi_cfg
->num_offload_peers
= tg_cfg
->num_offload_peers
;
3106 wmi_cfg
->num_offload_reorder_buffs
= tg_cfg
->num_offload_reorder_buffs
;
3107 wmi_cfg
->num_peer_keys
= tg_cfg
->num_peer_keys
;
3108 wmi_cfg
->num_tids
= tg_cfg
->num_tids
;
3109 wmi_cfg
->ast_skid_limit
= tg_cfg
->ast_skid_limit
;
3110 wmi_cfg
->tx_chain_mask
= tg_cfg
->tx_chain_mask
;
3111 wmi_cfg
->rx_chain_mask
= tg_cfg
->rx_chain_mask
;
3112 wmi_cfg
->rx_timeout_pri
[0] = tg_cfg
->rx_timeout_pri
[0];
3113 wmi_cfg
->rx_timeout_pri
[1] = tg_cfg
->rx_timeout_pri
[1];
3114 wmi_cfg
->rx_timeout_pri
[2] = tg_cfg
->rx_timeout_pri
[2];
3115 wmi_cfg
->rx_timeout_pri
[3] = tg_cfg
->rx_timeout_pri
[3];
3116 wmi_cfg
->rx_decap_mode
= tg_cfg
->rx_decap_mode
;
3117 wmi_cfg
->scan_max_pending_req
= tg_cfg
->scan_max_pending_req
;
3118 wmi_cfg
->bmiss_offload_max_vdev
= tg_cfg
->bmiss_offload_max_vdev
;
3119 wmi_cfg
->roam_offload_max_vdev
= tg_cfg
->roam_offload_max_vdev
;
3120 wmi_cfg
->roam_offload_max_ap_profiles
=
3121 tg_cfg
->roam_offload_max_ap_profiles
;
3122 wmi_cfg
->num_mcast_groups
= tg_cfg
->num_mcast_groups
;
3123 wmi_cfg
->num_mcast_table_elems
= tg_cfg
->num_mcast_table_elems
;
3124 wmi_cfg
->mcast2ucast_mode
= tg_cfg
->mcast2ucast_mode
;
3125 wmi_cfg
->tx_dbg_log_size
= tg_cfg
->tx_dbg_log_size
;
3126 wmi_cfg
->num_wds_entries
= tg_cfg
->num_wds_entries
;
3127 wmi_cfg
->dma_burst_size
= tg_cfg
->dma_burst_size
;
3128 wmi_cfg
->mac_aggr_delim
= tg_cfg
->mac_aggr_delim
;
3129 wmi_cfg
->rx_skip_defrag_timeout_dup_detection_check
=
3130 tg_cfg
->rx_skip_defrag_timeout_dup_detection_check
;
3131 wmi_cfg
->vow_config
= tg_cfg
->vow_config
;
3132 wmi_cfg
->gtk_offload_max_vdev
= tg_cfg
->gtk_offload_max_vdev
;
3133 wmi_cfg
->num_msdu_desc
= tg_cfg
->num_msdu_desc
;
3134 wmi_cfg
->max_frag_entries
= tg_cfg
->max_frag_entries
;
3135 wmi_cfg
->num_tdls_vdevs
= tg_cfg
->num_tdls_vdevs
;
3136 wmi_cfg
->num_tdls_conn_table_entries
=
3137 tg_cfg
->num_tdls_conn_table_entries
;
3138 wmi_cfg
->beacon_tx_offload_max_vdev
=
3139 tg_cfg
->beacon_tx_offload_max_vdev
;
3140 wmi_cfg
->num_multicast_filter_entries
=
3141 tg_cfg
->num_multicast_filter_entries
;
3142 wmi_cfg
->num_wow_filters
= tg_cfg
->num_wow_filters
;
3143 wmi_cfg
->num_keep_alive_pattern
= tg_cfg
->num_keep_alive_pattern
;
3144 wmi_cfg
->keep_alive_pattern_size
= tg_cfg
->keep_alive_pattern_size
;
3145 wmi_cfg
->max_tdls_concurrent_sleep_sta
=
3146 tg_cfg
->max_tdls_concurrent_sleep_sta
;
3147 wmi_cfg
->max_tdls_concurrent_buffer_sta
=
3148 tg_cfg
->max_tdls_concurrent_buffer_sta
;
3149 wmi_cfg
->wmi_send_separate
= tg_cfg
->wmi_send_separate
;
3150 wmi_cfg
->num_ocb_vdevs
= tg_cfg
->num_ocb_vdevs
;
3151 wmi_cfg
->num_ocb_channels
= tg_cfg
->num_ocb_channels
;
3152 wmi_cfg
->num_ocb_schedules
= tg_cfg
->num_ocb_schedules
;
3153 wmi_cfg
->bpf_instruction_size
= tg_cfg
->bpf_instruction_size
;
3154 wmi_cfg
->max_bssid_rx_filters
= tg_cfg
->max_bssid_rx_filters
;
3155 wmi_cfg
->use_pdev_id
= tg_cfg
->use_pdev_id
;
3156 wmi_cfg
->flag1
= tg_cfg
->atf_config
;
3157 wmi_cfg
->peer_map_unmap_v2_support
= tg_cfg
->peer_map_unmap_v2_support
;
3158 wmi_cfg
->sched_params
= tg_cfg
->sched_params
;
3159 wmi_cfg
->twt_ap_pdev_count
= tg_cfg
->twt_ap_pdev_count
;
3160 wmi_cfg
->twt_ap_sta_count
= tg_cfg
->twt_ap_sta_count
;
3163 static int ath11k_init_cmd_send(struct ath11k_pdev_wmi
*wmi
,
3164 struct wmi_init_cmd_param
*param
)
3166 struct ath11k_base
*ab
= wmi
->wmi_ab
->ab
;
3167 struct sk_buff
*skb
;
3168 struct wmi_init_cmd
*cmd
;
3169 struct wmi_resource_config
*cfg
;
3170 struct wmi_pdev_set_hw_mode_cmd_param
*hw_mode
;
3171 struct wmi_pdev_band_to_mac
*band_to_mac
;
3172 struct wlan_host_mem_chunk
*host_mem_chunks
;
3173 struct wmi_tlv
*tlv
;
3176 u32 hw_mode_len
= 0;
3179 if (param
->hw_mode_id
!= WMI_HOST_HW_MODE_MAX
)
3180 hw_mode_len
= sizeof(*hw_mode
) + TLV_HDR_SIZE
+
3181 (param
->num_band_to_mac
* sizeof(*band_to_mac
));
3183 len
= sizeof(*cmd
) + TLV_HDR_SIZE
+ sizeof(*cfg
) + hw_mode_len
+
3184 (param
->num_mem_chunks
? (sizeof(*host_mem_chunks
) * WMI_MAX_MEM_REQS
) : 0);
3186 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
3190 cmd
= (struct wmi_init_cmd
*)skb
->data
;
3192 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_INIT_CMD
) |
3193 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
3195 ptr
= skb
->data
+ sizeof(*cmd
);
3198 ath11k_wmi_copy_resource_config(cfg
, param
->res_cfg
);
3200 cfg
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_RESOURCE_CONFIG
) |
3201 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cfg
) - TLV_HDR_SIZE
);
3203 ptr
+= sizeof(*cfg
);
3204 host_mem_chunks
= ptr
+ TLV_HDR_SIZE
;
3205 len
= sizeof(struct wlan_host_mem_chunk
);
3207 for (idx
= 0; idx
< param
->num_mem_chunks
; ++idx
) {
3208 host_mem_chunks
[idx
].tlv_header
=
3209 FIELD_PREP(WMI_TLV_TAG
,
3210 WMI_TAG_WLAN_HOST_MEMORY_CHUNK
) |
3211 FIELD_PREP(WMI_TLV_LEN
, len
);
3213 host_mem_chunks
[idx
].ptr
= param
->mem_chunks
[idx
].paddr
;
3214 host_mem_chunks
[idx
].size
= param
->mem_chunks
[idx
].len
;
3215 host_mem_chunks
[idx
].req_id
= param
->mem_chunks
[idx
].req_id
;
3217 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
3218 "WMI host mem chunk req_id %d paddr 0x%llx len %d\n",
3219 param
->mem_chunks
[idx
].req_id
,
3220 (u64
)param
->mem_chunks
[idx
].paddr
,
3221 param
->mem_chunks
[idx
].len
);
3223 cmd
->num_host_mem_chunks
= param
->num_mem_chunks
;
3224 len
= sizeof(struct wlan_host_mem_chunk
) * param
->num_mem_chunks
;
3226 /* num_mem_chunks is zero */
3228 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
3229 FIELD_PREP(WMI_TLV_LEN
, len
);
3230 ptr
+= TLV_HDR_SIZE
+ len
;
3232 if (param
->hw_mode_id
!= WMI_HOST_HW_MODE_MAX
) {
3233 hw_mode
= (struct wmi_pdev_set_hw_mode_cmd_param
*)ptr
;
3234 hw_mode
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
3235 WMI_TAG_PDEV_SET_HW_MODE_CMD
) |
3236 FIELD_PREP(WMI_TLV_LEN
,
3237 sizeof(*hw_mode
) - TLV_HDR_SIZE
);
3239 hw_mode
->hw_mode_index
= param
->hw_mode_id
;
3240 hw_mode
->num_band_to_mac
= param
->num_band_to_mac
;
3242 ptr
+= sizeof(*hw_mode
);
3244 len
= param
->num_band_to_mac
* sizeof(*band_to_mac
);
3246 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
3247 FIELD_PREP(WMI_TLV_LEN
, len
);
3249 ptr
+= TLV_HDR_SIZE
;
3250 len
= sizeof(*band_to_mac
);
3252 for (idx
= 0; idx
< param
->num_band_to_mac
; idx
++) {
3253 band_to_mac
= (void *)ptr
;
3255 band_to_mac
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
3256 WMI_TAG_PDEV_BAND_TO_MAC
) |
3257 FIELD_PREP(WMI_TLV_LEN
,
3258 len
- TLV_HDR_SIZE
);
3259 band_to_mac
->pdev_id
= param
->band_to_mac
[idx
].pdev_id
;
3260 band_to_mac
->start_freq
=
3261 param
->band_to_mac
[idx
].start_freq
;
3262 band_to_mac
->end_freq
=
3263 param
->band_to_mac
[idx
].end_freq
;
3264 ptr
+= sizeof(*band_to_mac
);
3268 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_INIT_CMDID
);
3270 ath11k_warn(ab
, "failed to send WMI_INIT_CMDID\n");
3277 int ath11k_wmi_pdev_lro_cfg(struct ath11k
*ar
,
3280 struct ath11k_wmi_pdev_lro_config_cmd
*cmd
;
3281 struct sk_buff
*skb
;
3284 skb
= ath11k_wmi_alloc_skb(ar
->wmi
->wmi_ab
, sizeof(*cmd
));
3288 cmd
= (struct ath11k_wmi_pdev_lro_config_cmd
*)skb
->data
;
3289 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_LRO_INFO_CMD
) |
3290 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
3292 get_random_bytes(cmd
->th_4
, sizeof(uint32_t) * ATH11K_IPV4_TH_SEED_SIZE
);
3293 get_random_bytes(cmd
->th_6
, sizeof(uint32_t) * ATH11K_IPV6_TH_SEED_SIZE
);
3295 cmd
->pdev_id
= pdev_id
;
3297 ret
= ath11k_wmi_cmd_send(ar
->wmi
, skb
, WMI_LRO_CONFIG_CMDID
);
3300 "failed to send lro cfg req wmi cmd\n");
3304 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
3305 "WMI lro cfg cmd pdev_id 0x%x\n", pdev_id
);
3312 int ath11k_wmi_wait_for_service_ready(struct ath11k_base
*ab
)
3314 unsigned long time_left
;
3316 time_left
= wait_for_completion_timeout(&ab
->wmi_ab
.service_ready
,
3317 WMI_SERVICE_READY_TIMEOUT_HZ
);
3324 int ath11k_wmi_wait_for_unified_ready(struct ath11k_base
*ab
)
3326 unsigned long time_left
;
3328 time_left
= wait_for_completion_timeout(&ab
->wmi_ab
.unified_ready
,
3329 WMI_SERVICE_READY_TIMEOUT_HZ
);
3336 int ath11k_wmi_cmd_init(struct ath11k_base
*ab
)
3338 struct ath11k_wmi_base
*wmi_sc
= &ab
->wmi_ab
;
3339 struct wmi_init_cmd_param init_param
;
3340 struct target_resource_config config
;
3342 memset(&init_param
, 0, sizeof(init_param
));
3343 memset(&config
, 0, sizeof(config
));
3345 ab
->hw_params
.hw_ops
->wmi_init_config(ab
, &config
);
3347 memcpy(&wmi_sc
->wlan_resource_config
, &config
, sizeof(config
));
3349 init_param
.res_cfg
= &wmi_sc
->wlan_resource_config
;
3350 init_param
.num_mem_chunks
= wmi_sc
->num_mem_chunks
;
3351 init_param
.hw_mode_id
= wmi_sc
->preferred_hw_mode
;
3352 init_param
.mem_chunks
= wmi_sc
->mem_chunks
;
3354 if (wmi_sc
->preferred_hw_mode
== WMI_HOST_HW_MODE_SINGLE
)
3355 init_param
.hw_mode_id
= WMI_HOST_HW_MODE_MAX
;
3357 if (ab
->hw_params
.needs_band_to_mac
) {
3358 init_param
.num_band_to_mac
= ab
->num_radios
;
3359 ath11k_fill_band_to_mac_param(ab
, init_param
.band_to_mac
);
3362 return ath11k_init_cmd_send(&wmi_sc
->wmi
[0], &init_param
);
3365 int ath11k_wmi_vdev_spectral_conf(struct ath11k
*ar
,
3366 struct ath11k_wmi_vdev_spectral_conf_param
*param
)
3368 struct ath11k_wmi_vdev_spectral_conf_cmd
*cmd
;
3369 struct sk_buff
*skb
;
3372 skb
= ath11k_wmi_alloc_skb(ar
->wmi
->wmi_ab
, sizeof(*cmd
));
3376 cmd
= (struct ath11k_wmi_vdev_spectral_conf_cmd
*)skb
->data
;
3377 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
3378 WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD
) |
3379 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
3381 memcpy(&cmd
->param
, param
, sizeof(*param
));
3383 ret
= ath11k_wmi_cmd_send(ar
->wmi
, skb
,
3384 WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID
);
3387 "failed to send spectral scan config wmi cmd\n");
3391 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
3392 "WMI spectral scan config cmd vdev_id 0x%x\n",
3401 int ath11k_wmi_vdev_spectral_enable(struct ath11k
*ar
, u32 vdev_id
,
3402 u32 trigger
, u32 enable
)
3404 struct ath11k_wmi_vdev_spectral_enable_cmd
*cmd
;
3405 struct sk_buff
*skb
;
3408 skb
= ath11k_wmi_alloc_skb(ar
->wmi
->wmi_ab
, sizeof(*cmd
));
3412 cmd
= (struct ath11k_wmi_vdev_spectral_enable_cmd
*)skb
->data
;
3413 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
3414 WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD
) |
3415 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
3417 cmd
->vdev_id
= vdev_id
;
3418 cmd
->trigger_cmd
= trigger
;
3419 cmd
->enable_cmd
= enable
;
3421 ret
= ath11k_wmi_cmd_send(ar
->wmi
, skb
,
3422 WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID
);
3425 "failed to send spectral enable wmi cmd\n");
3429 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
3430 "WMI spectral enable cmd vdev id 0x%x\n",
3439 int ath11k_wmi_pdev_dma_ring_cfg(struct ath11k
*ar
,
3440 struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd
*param
)
3442 struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd
*cmd
;
3443 struct sk_buff
*skb
;
3446 skb
= ath11k_wmi_alloc_skb(ar
->wmi
->wmi_ab
, sizeof(*cmd
));
3450 cmd
= (struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd
*)skb
->data
;
3451 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_DMA_RING_CFG_REQ
) |
3452 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
3454 cmd
->pdev_id
= param
->pdev_id
;
3455 cmd
->module_id
= param
->module_id
;
3456 cmd
->base_paddr_lo
= param
->base_paddr_lo
;
3457 cmd
->base_paddr_hi
= param
->base_paddr_hi
;
3458 cmd
->head_idx_paddr_lo
= param
->head_idx_paddr_lo
;
3459 cmd
->head_idx_paddr_hi
= param
->head_idx_paddr_hi
;
3460 cmd
->tail_idx_paddr_lo
= param
->tail_idx_paddr_lo
;
3461 cmd
->tail_idx_paddr_hi
= param
->tail_idx_paddr_hi
;
3462 cmd
->num_elems
= param
->num_elems
;
3463 cmd
->buf_size
= param
->buf_size
;
3464 cmd
->num_resp_per_event
= param
->num_resp_per_event
;
3465 cmd
->event_timeout_ms
= param
->event_timeout_ms
;
3467 ret
= ath11k_wmi_cmd_send(ar
->wmi
, skb
,
3468 WMI_PDEV_DMA_RING_CFG_REQ_CMDID
);
3471 "failed to send dma ring cfg req wmi cmd\n");
3475 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
3476 "WMI DMA ring cfg req cmd pdev_id 0x%x\n",
3485 static int ath11k_wmi_tlv_dma_buf_entry_parse(struct ath11k_base
*soc
,
3487 const void *ptr
, void *data
)
3489 struct wmi_tlv_dma_buf_release_parse
*parse
= data
;
3491 if (tag
!= WMI_TAG_DMA_BUF_RELEASE_ENTRY
)
3494 if (parse
->num_buf_entry
>= parse
->fixed
.num_buf_release_entry
)
3497 parse
->num_buf_entry
++;
3501 static int ath11k_wmi_tlv_dma_buf_meta_parse(struct ath11k_base
*soc
,
3503 const void *ptr
, void *data
)
3505 struct wmi_tlv_dma_buf_release_parse
*parse
= data
;
3507 if (tag
!= WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA
)
3510 if (parse
->num_meta
>= parse
->fixed
.num_meta_data_entry
)
3517 static int ath11k_wmi_tlv_dma_buf_parse(struct ath11k_base
*ab
,
3519 const void *ptr
, void *data
)
3521 struct wmi_tlv_dma_buf_release_parse
*parse
= data
;
3525 case WMI_TAG_DMA_BUF_RELEASE
:
3526 memcpy(&parse
->fixed
, ptr
,
3527 sizeof(struct ath11k_wmi_dma_buf_release_fixed_param
));
3528 parse
->fixed
.pdev_id
= DP_HW2SW_MACID(parse
->fixed
.pdev_id
);
3530 case WMI_TAG_ARRAY_STRUCT
:
3531 if (!parse
->buf_entry_done
) {
3532 parse
->num_buf_entry
= 0;
3533 parse
->buf_entry
= (struct wmi_dma_buf_release_entry
*)ptr
;
3535 ret
= ath11k_wmi_tlv_iter(ab
, ptr
, len
,
3536 ath11k_wmi_tlv_dma_buf_entry_parse
,
3539 ath11k_warn(ab
, "failed to parse dma buf entry tlv %d\n",
3544 parse
->buf_entry_done
= true;
3545 } else if (!parse
->meta_data_done
) {
3546 parse
->num_meta
= 0;
3547 parse
->meta_data
= (struct wmi_dma_buf_release_meta_data
*)ptr
;
3549 ret
= ath11k_wmi_tlv_iter(ab
, ptr
, len
,
3550 ath11k_wmi_tlv_dma_buf_meta_parse
,
3553 ath11k_warn(ab
, "failed to parse dma buf meta tlv %d\n",
3558 parse
->meta_data_done
= true;
3567 static void ath11k_wmi_pdev_dma_ring_buf_release_event(struct ath11k_base
*ab
,
3568 struct sk_buff
*skb
)
3570 struct wmi_tlv_dma_buf_release_parse parse
= { };
3571 struct ath11k_dbring_buf_release_event param
;
3574 ret
= ath11k_wmi_tlv_iter(ab
, skb
->data
, skb
->len
,
3575 ath11k_wmi_tlv_dma_buf_parse
,
3578 ath11k_warn(ab
, "failed to parse dma buf release tlv %d\n", ret
);
3582 param
.fixed
= parse
.fixed
;
3583 param
.buf_entry
= parse
.buf_entry
;
3584 param
.num_buf_entry
= parse
.num_buf_entry
;
3585 param
.meta_data
= parse
.meta_data
;
3586 param
.num_meta
= parse
.num_meta
;
3588 ret
= ath11k_dbring_buffer_release_event(ab
, ¶m
);
3590 ath11k_warn(ab
, "failed to handle dma buf release event %d\n", ret
);
3595 static int ath11k_wmi_tlv_hw_mode_caps_parse(struct ath11k_base
*soc
,
3597 const void *ptr
, void *data
)
3599 struct wmi_tlv_svc_rdy_ext_parse
*svc_rdy_ext
= data
;
3600 struct wmi_hw_mode_capabilities
*hw_mode_cap
;
3603 if (tag
!= WMI_TAG_HW_MODE_CAPABILITIES
)
3606 if (svc_rdy_ext
->n_hw_mode_caps
>= svc_rdy_ext
->param
.num_hw_modes
)
3609 hw_mode_cap
= container_of(ptr
, struct wmi_hw_mode_capabilities
,
3611 svc_rdy_ext
->n_hw_mode_caps
++;
3613 phy_map
= hw_mode_cap
->phy_id_map
;
3615 svc_rdy_ext
->tot_phy_id
++;
3616 phy_map
= phy_map
>> 1;
3622 static int ath11k_wmi_tlv_hw_mode_caps(struct ath11k_base
*soc
,
3623 u16 len
, const void *ptr
, void *data
)
3625 struct wmi_tlv_svc_rdy_ext_parse
*svc_rdy_ext
= data
;
3626 struct wmi_hw_mode_capabilities
*hw_mode_caps
;
3627 enum wmi_host_hw_mode_config_type mode
, pref
;
3631 svc_rdy_ext
->n_hw_mode_caps
= 0;
3632 svc_rdy_ext
->hw_mode_caps
= (struct wmi_hw_mode_capabilities
*)ptr
;
3634 ret
= ath11k_wmi_tlv_iter(soc
, ptr
, len
,
3635 ath11k_wmi_tlv_hw_mode_caps_parse
,
3638 ath11k_warn(soc
, "failed to parse tlv %d\n", ret
);
3643 while (i
< svc_rdy_ext
->n_hw_mode_caps
) {
3644 hw_mode_caps
= &svc_rdy_ext
->hw_mode_caps
[i
];
3645 mode
= hw_mode_caps
->hw_mode_id
;
3646 pref
= soc
->wmi_ab
.preferred_hw_mode
;
3648 if (ath11k_hw_mode_pri_map
[mode
] < ath11k_hw_mode_pri_map
[pref
]) {
3649 svc_rdy_ext
->pref_hw_mode_caps
= *hw_mode_caps
;
3650 soc
->wmi_ab
.preferred_hw_mode
= mode
;
3655 ath11k_dbg(soc
, ATH11K_DBG_WMI
, "preferred_hw_mode:%d\n",
3656 soc
->wmi_ab
.preferred_hw_mode
);
3657 if (soc
->wmi_ab
.preferred_hw_mode
== WMI_HOST_HW_MODE_MAX
)
3663 static int ath11k_wmi_tlv_mac_phy_caps_parse(struct ath11k_base
*soc
,
3665 const void *ptr
, void *data
)
3667 struct wmi_tlv_svc_rdy_ext_parse
*svc_rdy_ext
= data
;
3669 if (tag
!= WMI_TAG_MAC_PHY_CAPABILITIES
)
3672 if (svc_rdy_ext
->n_mac_phy_caps
>= svc_rdy_ext
->tot_phy_id
)
3675 len
= min_t(u16
, len
, sizeof(struct wmi_mac_phy_capabilities
));
3676 if (!svc_rdy_ext
->n_mac_phy_caps
) {
3677 svc_rdy_ext
->mac_phy_caps
= kzalloc((svc_rdy_ext
->tot_phy_id
) * len
,
3679 if (!svc_rdy_ext
->mac_phy_caps
)
3683 memcpy(svc_rdy_ext
->mac_phy_caps
+ svc_rdy_ext
->n_mac_phy_caps
, ptr
, len
);
3684 svc_rdy_ext
->n_mac_phy_caps
++;
3688 static int ath11k_wmi_tlv_ext_hal_reg_caps_parse(struct ath11k_base
*soc
,
3690 const void *ptr
, void *data
)
3692 struct wmi_tlv_svc_rdy_ext_parse
*svc_rdy_ext
= data
;
3694 if (tag
!= WMI_TAG_HAL_REG_CAPABILITIES_EXT
)
3697 if (svc_rdy_ext
->n_ext_hal_reg_caps
>= svc_rdy_ext
->param
.num_phy
)
3700 svc_rdy_ext
->n_ext_hal_reg_caps
++;
3704 static int ath11k_wmi_tlv_ext_hal_reg_caps(struct ath11k_base
*soc
,
3705 u16 len
, const void *ptr
, void *data
)
3707 struct ath11k_pdev_wmi
*wmi_handle
= &soc
->wmi_ab
.wmi
[0];
3708 struct wmi_tlv_svc_rdy_ext_parse
*svc_rdy_ext
= data
;
3709 struct ath11k_hal_reg_capabilities_ext reg_cap
;
3713 svc_rdy_ext
->n_ext_hal_reg_caps
= 0;
3714 svc_rdy_ext
->ext_hal_reg_caps
= (struct wmi_hal_reg_capabilities_ext
*)ptr
;
3715 ret
= ath11k_wmi_tlv_iter(soc
, ptr
, len
,
3716 ath11k_wmi_tlv_ext_hal_reg_caps_parse
,
3719 ath11k_warn(soc
, "failed to parse tlv %d\n", ret
);
3723 for (i
= 0; i
< svc_rdy_ext
->param
.num_phy
; i
++) {
3724 ret
= ath11k_pull_reg_cap_svc_rdy_ext(wmi_handle
,
3725 svc_rdy_ext
->soc_hal_reg_caps
,
3726 svc_rdy_ext
->ext_hal_reg_caps
, i
,
3729 ath11k_warn(soc
, "failed to extract reg cap %d\n", i
);
3733 memcpy(&soc
->hal_reg_cap
[reg_cap
.phy_id
],
3734 ®_cap
, sizeof(reg_cap
));
3739 static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base
*soc
,
3740 u16 len
, const void *ptr
,
3743 struct ath11k_pdev_wmi
*wmi_handle
= &soc
->wmi_ab
.wmi
[0];
3744 struct wmi_tlv_svc_rdy_ext_parse
*svc_rdy_ext
= data
;
3745 u8 hw_mode_id
= svc_rdy_ext
->pref_hw_mode_caps
.hw_mode_id
;
3750 svc_rdy_ext
->soc_hal_reg_caps
= (struct wmi_soc_hal_reg_capabilities
*)ptr
;
3751 svc_rdy_ext
->param
.num_phy
= svc_rdy_ext
->soc_hal_reg_caps
->num_phy
;
3753 soc
->num_radios
= 0;
3754 phy_id_map
= svc_rdy_ext
->pref_hw_mode_caps
.phy_id_map
;
3756 while (phy_id_map
&& soc
->num_radios
< MAX_RADIOS
) {
3757 ret
= ath11k_pull_mac_phy_cap_svc_ready_ext(wmi_handle
,
3758 svc_rdy_ext
->hw_caps
,
3759 svc_rdy_ext
->hw_mode_caps
,
3760 svc_rdy_ext
->soc_hal_reg_caps
,
3761 svc_rdy_ext
->mac_phy_caps
,
3762 hw_mode_id
, soc
->num_radios
,
3763 &soc
->pdevs
[pdev_index
]);
3765 ath11k_warn(soc
, "failed to extract mac caps, idx :%d\n",
3772 /* For QCA6390, save mac_phy capability in the same pdev */
3773 if (soc
->hw_params
.single_pdev_only
)
3776 pdev_index
= soc
->num_radios
;
3778 /* TODO: mac_phy_cap prints */
3782 /* For QCA6390, set num_radios to 1 because host manages
3783 * both 2G and 5G radio in one pdev.
3784 * Set pdev_id = 0 and 0 means soc level.
3786 if (soc
->hw_params
.single_pdev_only
) {
3787 soc
->num_radios
= 1;
3788 soc
->pdevs
[0].pdev_id
= 0;
3794 static int ath11k_wmi_tlv_dma_ring_caps_parse(struct ath11k_base
*soc
,
3796 const void *ptr
, void *data
)
3798 struct wmi_tlv_dma_ring_caps_parse
*parse
= data
;
3800 if (tag
!= WMI_TAG_DMA_RING_CAPABILITIES
)
3803 parse
->n_dma_ring_caps
++;
3807 static int ath11k_wmi_alloc_dbring_caps(struct ath11k_base
*ab
,
3813 sz
= num_cap
* sizeof(struct ath11k_dbring_cap
);
3814 ptr
= kzalloc(sz
, GFP_ATOMIC
);
3819 ab
->num_db_cap
= num_cap
;
3824 static void ath11k_wmi_free_dbring_caps(struct ath11k_base
*ab
)
3830 static int ath11k_wmi_tlv_dma_ring_caps(struct ath11k_base
*ab
,
3831 u16 len
, const void *ptr
, void *data
)
3833 struct wmi_tlv_dma_ring_caps_parse
*dma_caps_parse
= data
;
3834 struct wmi_dma_ring_capabilities
*dma_caps
;
3835 struct ath11k_dbring_cap
*dir_buff_caps
;
3839 dma_caps_parse
->n_dma_ring_caps
= 0;
3840 dma_caps
= (struct wmi_dma_ring_capabilities
*)ptr
;
3841 ret
= ath11k_wmi_tlv_iter(ab
, ptr
, len
,
3842 ath11k_wmi_tlv_dma_ring_caps_parse
,
3845 ath11k_warn(ab
, "failed to parse dma ring caps tlv %d\n", ret
);
3849 if (!dma_caps_parse
->n_dma_ring_caps
)
3852 if (ab
->num_db_cap
) {
3853 ath11k_warn(ab
, "Already processed, so ignoring dma ring caps\n");
3857 ret
= ath11k_wmi_alloc_dbring_caps(ab
, dma_caps_parse
->n_dma_ring_caps
);
3861 dir_buff_caps
= ab
->db_caps
;
3862 for (i
= 0; i
< dma_caps_parse
->n_dma_ring_caps
; i
++) {
3863 if (dma_caps
[i
].module_id
>= WMI_DIRECT_BUF_MAX
) {
3864 ath11k_warn(ab
, "Invalid module id %d\n", dma_caps
[i
].module_id
);
3869 dir_buff_caps
[i
].id
= dma_caps
[i
].module_id
;
3870 dir_buff_caps
[i
].pdev_id
= DP_HW2SW_MACID(dma_caps
[i
].pdev_id
);
3871 dir_buff_caps
[i
].min_elem
= dma_caps
[i
].min_elem
;
3872 dir_buff_caps
[i
].min_buf_sz
= dma_caps
[i
].min_buf_sz
;
3873 dir_buff_caps
[i
].min_buf_align
= dma_caps
[i
].min_buf_align
;
3879 ath11k_wmi_free_dbring_caps(ab
);
3883 static int ath11k_wmi_tlv_svc_rdy_ext_parse(struct ath11k_base
*ab
,
3885 const void *ptr
, void *data
)
3887 struct ath11k_pdev_wmi
*wmi_handle
= &ab
->wmi_ab
.wmi
[0];
3888 struct wmi_tlv_svc_rdy_ext_parse
*svc_rdy_ext
= data
;
3892 case WMI_TAG_SERVICE_READY_EXT_EVENT
:
3893 ret
= ath11k_pull_svc_ready_ext(wmi_handle
, ptr
,
3894 &svc_rdy_ext
->param
);
3896 ath11k_warn(ab
, "unable to extract ext params\n");
3901 case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS
:
3902 svc_rdy_ext
->hw_caps
= (struct wmi_soc_mac_phy_hw_mode_caps
*)ptr
;
3903 svc_rdy_ext
->param
.num_hw_modes
= svc_rdy_ext
->hw_caps
->num_hw_modes
;
3906 case WMI_TAG_SOC_HAL_REG_CAPABILITIES
:
3907 ret
= ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(ab
, len
, ptr
,
3913 case WMI_TAG_ARRAY_STRUCT
:
3914 if (!svc_rdy_ext
->hw_mode_done
) {
3915 ret
= ath11k_wmi_tlv_hw_mode_caps(ab
, len
, ptr
,
3920 svc_rdy_ext
->hw_mode_done
= true;
3921 } else if (!svc_rdy_ext
->mac_phy_done
) {
3922 svc_rdy_ext
->n_mac_phy_caps
= 0;
3923 ret
= ath11k_wmi_tlv_iter(ab
, ptr
, len
,
3924 ath11k_wmi_tlv_mac_phy_caps_parse
,
3927 ath11k_warn(ab
, "failed to parse tlv %d\n", ret
);
3931 svc_rdy_ext
->mac_phy_done
= true;
3932 } else if (!svc_rdy_ext
->ext_hal_reg_done
) {
3933 ret
= ath11k_wmi_tlv_ext_hal_reg_caps(ab
, len
, ptr
,
3938 svc_rdy_ext
->ext_hal_reg_done
= true;
3939 } else if (!svc_rdy_ext
->mac_phy_chainmask_combo_done
) {
3940 svc_rdy_ext
->mac_phy_chainmask_combo_done
= true;
3941 } else if (!svc_rdy_ext
->mac_phy_chainmask_cap_done
) {
3942 svc_rdy_ext
->mac_phy_chainmask_cap_done
= true;
3943 } else if (!svc_rdy_ext
->oem_dma_ring_cap_done
) {
3944 svc_rdy_ext
->oem_dma_ring_cap_done
= true;
3945 } else if (!svc_rdy_ext
->dma_ring_cap_done
) {
3946 ret
= ath11k_wmi_tlv_dma_ring_caps(ab
, len
, ptr
,
3947 &svc_rdy_ext
->dma_caps_parse
);
3951 svc_rdy_ext
->dma_ring_cap_done
= true;
3961 static int ath11k_service_ready_ext_event(struct ath11k_base
*ab
,
3962 struct sk_buff
*skb
)
3964 struct wmi_tlv_svc_rdy_ext_parse svc_rdy_ext
= { };
3967 ret
= ath11k_wmi_tlv_iter(ab
, skb
->data
, skb
->len
,
3968 ath11k_wmi_tlv_svc_rdy_ext_parse
,
3971 ath11k_warn(ab
, "failed to parse tlv %d\n", ret
);
3975 if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG
, ab
->wmi_ab
.svc_map
))
3976 complete(&ab
->wmi_ab
.service_ready
);
3978 kfree(svc_rdy_ext
.mac_phy_caps
);
3982 ath11k_wmi_free_dbring_caps(ab
);
3986 static int ath11k_wmi_tlv_svc_rdy_ext2_parse(struct ath11k_base
*ab
,
3988 const void *ptr
, void *data
)
3990 struct wmi_tlv_svc_rdy_ext2_parse
*parse
= data
;
3994 case WMI_TAG_ARRAY_STRUCT
:
3995 if (!parse
->dma_ring_cap_done
) {
3996 ret
= ath11k_wmi_tlv_dma_ring_caps(ab
, len
, ptr
,
3997 &parse
->dma_caps_parse
);
4001 parse
->dma_ring_cap_done
= true;
4011 static int ath11k_service_ready_ext2_event(struct ath11k_base
*ab
,
4012 struct sk_buff
*skb
)
4014 struct wmi_tlv_svc_rdy_ext2_parse svc_rdy_ext2
= { };
4017 ret
= ath11k_wmi_tlv_iter(ab
, skb
->data
, skb
->len
,
4018 ath11k_wmi_tlv_svc_rdy_ext2_parse
,
4021 ath11k_warn(ab
, "failed to parse ext2 event tlv %d\n", ret
);
4025 complete(&ab
->wmi_ab
.service_ready
);
4030 ath11k_wmi_free_dbring_caps(ab
);
4034 static int ath11k_pull_vdev_start_resp_tlv(struct ath11k_base
*ab
, struct sk_buff
*skb
,
4035 struct wmi_vdev_start_resp_event
*vdev_rsp
)
4038 const struct wmi_vdev_start_resp_event
*ev
;
4041 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4044 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4048 ev
= tb
[WMI_TAG_VDEV_START_RESPONSE_EVENT
];
4050 ath11k_warn(ab
, "failed to fetch vdev start resp ev");
4055 memset(vdev_rsp
, 0, sizeof(*vdev_rsp
));
4057 vdev_rsp
->vdev_id
= ev
->vdev_id
;
4058 vdev_rsp
->requestor_id
= ev
->requestor_id
;
4059 vdev_rsp
->resp_type
= ev
->resp_type
;
4060 vdev_rsp
->status
= ev
->status
;
4061 vdev_rsp
->chain_mask
= ev
->chain_mask
;
4062 vdev_rsp
->smps_mode
= ev
->smps_mode
;
4063 vdev_rsp
->mac_id
= ev
->mac_id
;
4064 vdev_rsp
->cfgd_tx_streams
= ev
->cfgd_tx_streams
;
4065 vdev_rsp
->cfgd_rx_streams
= ev
->cfgd_rx_streams
;
4071 static struct cur_reg_rule
4072 *create_reg_rules_from_wmi(u32 num_reg_rules
,
4073 struct wmi_regulatory_rule_struct
*wmi_reg_rule
)
4075 struct cur_reg_rule
*reg_rule_ptr
;
4078 reg_rule_ptr
= kzalloc((num_reg_rules
* sizeof(*reg_rule_ptr
)),
4084 for (count
= 0; count
< num_reg_rules
; count
++) {
4085 reg_rule_ptr
[count
].start_freq
=
4086 FIELD_GET(REG_RULE_START_FREQ
,
4087 wmi_reg_rule
[count
].freq_info
);
4088 reg_rule_ptr
[count
].end_freq
=
4089 FIELD_GET(REG_RULE_END_FREQ
,
4090 wmi_reg_rule
[count
].freq_info
);
4091 reg_rule_ptr
[count
].max_bw
=
4092 FIELD_GET(REG_RULE_MAX_BW
,
4093 wmi_reg_rule
[count
].bw_pwr_info
);
4094 reg_rule_ptr
[count
].reg_power
=
4095 FIELD_GET(REG_RULE_REG_PWR
,
4096 wmi_reg_rule
[count
].bw_pwr_info
);
4097 reg_rule_ptr
[count
].ant_gain
=
4098 FIELD_GET(REG_RULE_ANT_GAIN
,
4099 wmi_reg_rule
[count
].bw_pwr_info
);
4100 reg_rule_ptr
[count
].flags
=
4101 FIELD_GET(REG_RULE_FLAGS
,
4102 wmi_reg_rule
[count
].flag_info
);
4105 return reg_rule_ptr
;
4108 static int ath11k_pull_reg_chan_list_update_ev(struct ath11k_base
*ab
,
4109 struct sk_buff
*skb
,
4110 struct cur_regulatory_info
*reg_info
)
4113 const struct wmi_reg_chan_list_cc_event
*chan_list_event_hdr
;
4114 struct wmi_regulatory_rule_struct
*wmi_reg_rule
;
4115 u32 num_2g_reg_rules
, num_5g_reg_rules
;
4118 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "processing regulatory channel list\n");
4120 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4123 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4127 chan_list_event_hdr
= tb
[WMI_TAG_REG_CHAN_LIST_CC_EVENT
];
4128 if (!chan_list_event_hdr
) {
4129 ath11k_warn(ab
, "failed to fetch reg chan list update ev\n");
4134 reg_info
->num_2g_reg_rules
= chan_list_event_hdr
->num_2g_reg_rules
;
4135 reg_info
->num_5g_reg_rules
= chan_list_event_hdr
->num_5g_reg_rules
;
4137 if (!(reg_info
->num_2g_reg_rules
+ reg_info
->num_5g_reg_rules
)) {
4138 ath11k_warn(ab
, "No regulatory rules available in the event info\n");
4143 memcpy(reg_info
->alpha2
, &chan_list_event_hdr
->alpha2
,
4145 reg_info
->dfs_region
= chan_list_event_hdr
->dfs_region
;
4146 reg_info
->phybitmap
= chan_list_event_hdr
->phybitmap
;
4147 reg_info
->num_phy
= chan_list_event_hdr
->num_phy
;
4148 reg_info
->phy_id
= chan_list_event_hdr
->phy_id
;
4149 reg_info
->ctry_code
= chan_list_event_hdr
->country_id
;
4150 reg_info
->reg_dmn_pair
= chan_list_event_hdr
->domain_code
;
4151 if (chan_list_event_hdr
->status_code
== WMI_REG_SET_CC_STATUS_PASS
)
4152 reg_info
->status_code
= REG_SET_CC_STATUS_PASS
;
4153 else if (chan_list_event_hdr
->status_code
== WMI_REG_CURRENT_ALPHA2_NOT_FOUND
)
4154 reg_info
->status_code
= REG_CURRENT_ALPHA2_NOT_FOUND
;
4155 else if (chan_list_event_hdr
->status_code
== WMI_REG_INIT_ALPHA2_NOT_FOUND
)
4156 reg_info
->status_code
= REG_INIT_ALPHA2_NOT_FOUND
;
4157 else if (chan_list_event_hdr
->status_code
== WMI_REG_SET_CC_CHANGE_NOT_ALLOWED
)
4158 reg_info
->status_code
= REG_SET_CC_CHANGE_NOT_ALLOWED
;
4159 else if (chan_list_event_hdr
->status_code
== WMI_REG_SET_CC_STATUS_NO_MEMORY
)
4160 reg_info
->status_code
= REG_SET_CC_STATUS_NO_MEMORY
;
4161 else if (chan_list_event_hdr
->status_code
== WMI_REG_SET_CC_STATUS_FAIL
)
4162 reg_info
->status_code
= REG_SET_CC_STATUS_FAIL
;
4164 reg_info
->min_bw_2g
= chan_list_event_hdr
->min_bw_2g
;
4165 reg_info
->max_bw_2g
= chan_list_event_hdr
->max_bw_2g
;
4166 reg_info
->min_bw_5g
= chan_list_event_hdr
->min_bw_5g
;
4167 reg_info
->max_bw_5g
= chan_list_event_hdr
->max_bw_5g
;
4169 num_2g_reg_rules
= reg_info
->num_2g_reg_rules
;
4170 num_5g_reg_rules
= reg_info
->num_5g_reg_rules
;
4172 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
4173 "%s:cc %s dsf %d BW: min_2g %d max_2g %d min_5g %d max_5g %d",
4174 __func__
, reg_info
->alpha2
, reg_info
->dfs_region
,
4175 reg_info
->min_bw_2g
, reg_info
->max_bw_2g
,
4176 reg_info
->min_bw_5g
, reg_info
->max_bw_5g
);
4178 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
4179 "%s: num_2g_reg_rules %d num_5g_reg_rules %d", __func__
,
4180 num_2g_reg_rules
, num_5g_reg_rules
);
4183 (struct wmi_regulatory_rule_struct
*)((u8
*)chan_list_event_hdr
4184 + sizeof(*chan_list_event_hdr
)
4185 + sizeof(struct wmi_tlv
));
4187 if (num_2g_reg_rules
) {
4188 reg_info
->reg_rules_2g_ptr
= create_reg_rules_from_wmi(num_2g_reg_rules
,
4190 if (!reg_info
->reg_rules_2g_ptr
) {
4192 ath11k_warn(ab
, "Unable to Allocate memory for 2g rules\n");
4197 if (num_5g_reg_rules
) {
4198 wmi_reg_rule
+= num_2g_reg_rules
;
4199 reg_info
->reg_rules_5g_ptr
= create_reg_rules_from_wmi(num_5g_reg_rules
,
4201 if (!reg_info
->reg_rules_5g_ptr
) {
4203 ath11k_warn(ab
, "Unable to Allocate memory for 5g rules\n");
4208 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "processed regulatory channel list\n");
4214 static int ath11k_pull_peer_del_resp_ev(struct ath11k_base
*ab
, struct sk_buff
*skb
,
4215 struct wmi_peer_delete_resp_event
*peer_del_resp
)
4218 const struct wmi_peer_delete_resp_event
*ev
;
4221 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4224 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4228 ev
= tb
[WMI_TAG_PEER_DELETE_RESP_EVENT
];
4230 ath11k_warn(ab
, "failed to fetch peer delete resp ev");
4235 memset(peer_del_resp
, 0, sizeof(*peer_del_resp
));
4237 peer_del_resp
->vdev_id
= ev
->vdev_id
;
4238 ether_addr_copy(peer_del_resp
->peer_macaddr
.addr
,
4239 ev
->peer_macaddr
.addr
);
4245 static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base
*ab
, void *evt_buf
,
4246 u32 len
, u32
*vdev_id
,
4250 const struct wmi_bcn_tx_status_event
*ev
;
4253 tb
= ath11k_wmi_tlv_parse_alloc(ab
, evt_buf
, len
, GFP_ATOMIC
);
4256 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4260 ev
= tb
[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT
];
4262 ath11k_warn(ab
, "failed to fetch bcn tx status ev");
4267 *vdev_id
= ev
->vdev_id
;
4268 *tx_status
= ev
->tx_status
;
4274 static int ath11k_pull_vdev_stopped_param_tlv(struct ath11k_base
*ab
, struct sk_buff
*skb
,
4278 const struct wmi_vdev_stopped_event
*ev
;
4281 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4284 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4288 ev
= tb
[WMI_TAG_VDEV_STOPPED_EVENT
];
4290 ath11k_warn(ab
, "failed to fetch vdev stop ev");
4295 *vdev_id
= ev
->vdev_id
;
4301 static int ath11k_pull_mgmt_rx_params_tlv(struct ath11k_base
*ab
,
4302 struct sk_buff
*skb
,
4303 struct mgmt_rx_event_params
*hdr
)
4306 const struct wmi_mgmt_rx_hdr
*ev
;
4310 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4313 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4317 ev
= tb
[WMI_TAG_MGMT_RX_HDR
];
4318 frame
= tb
[WMI_TAG_ARRAY_BYTE
];
4320 if (!ev
|| !frame
) {
4321 ath11k_warn(ab
, "failed to fetch mgmt rx hdr");
4326 hdr
->pdev_id
= ev
->pdev_id
;
4327 hdr
->chan_freq
= ev
->chan_freq
;
4328 hdr
->channel
= ev
->channel
;
4330 hdr
->rate
= ev
->rate
;
4331 hdr
->phy_mode
= ev
->phy_mode
;
4332 hdr
->buf_len
= ev
->buf_len
;
4333 hdr
->status
= ev
->status
;
4334 hdr
->flags
= ev
->flags
;
4335 hdr
->rssi
= ev
->rssi
;
4336 hdr
->tsf_delta
= ev
->tsf_delta
;
4337 memcpy(hdr
->rssi_ctl
, ev
->rssi_ctl
, sizeof(hdr
->rssi_ctl
));
4339 if (skb
->len
< (frame
- skb
->data
) + hdr
->buf_len
) {
4340 ath11k_warn(ab
, "invalid length in mgmt rx hdr ev");
4345 /* shift the sk_buff to point to `frame` */
4347 skb_put(skb
, frame
- skb
->data
);
4348 skb_pull(skb
, frame
- skb
->data
);
4349 skb_put(skb
, hdr
->buf_len
);
4351 ath11k_ce_byte_swap(skb
->data
, hdr
->buf_len
);
4357 static int wmi_process_mgmt_tx_comp(struct ath11k
*ar
, u32 desc_id
,
4360 struct sk_buff
*msdu
;
4361 struct ieee80211_tx_info
*info
;
4362 struct ath11k_skb_cb
*skb_cb
;
4364 spin_lock_bh(&ar
->txmgmt_idr_lock
);
4365 msdu
= idr_find(&ar
->txmgmt_idr
, desc_id
);
4368 ath11k_warn(ar
->ab
, "received mgmt tx compl for invalid msdu_id: %d\n",
4370 spin_unlock_bh(&ar
->txmgmt_idr_lock
);
4374 idr_remove(&ar
->txmgmt_idr
, desc_id
);
4375 spin_unlock_bh(&ar
->txmgmt_idr_lock
);
4377 skb_cb
= ATH11K_SKB_CB(msdu
);
4378 dma_unmap_single(ar
->ab
->dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
4380 info
= IEEE80211_SKB_CB(msdu
);
4381 if ((!(info
->flags
& IEEE80211_TX_CTL_NO_ACK
)) && !status
)
4382 info
->flags
|= IEEE80211_TX_STAT_ACK
;
4384 ieee80211_tx_status_irqsafe(ar
->hw
, msdu
);
4386 /* WARN when we received this event without doing any mgmt tx */
4387 if (atomic_dec_if_positive(&ar
->num_pending_mgmt_tx
) < 0)
4393 static int ath11k_pull_mgmt_tx_compl_param_tlv(struct ath11k_base
*ab
,
4394 struct sk_buff
*skb
,
4395 struct wmi_mgmt_tx_compl_event
*param
)
4398 const struct wmi_mgmt_tx_compl_event
*ev
;
4401 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4404 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4408 ev
= tb
[WMI_TAG_MGMT_TX_COMPL_EVENT
];
4410 ath11k_warn(ab
, "failed to fetch mgmt tx compl ev");
4415 param
->pdev_id
= ev
->pdev_id
;
4416 param
->desc_id
= ev
->desc_id
;
4417 param
->status
= ev
->status
;
4423 static void ath11k_wmi_event_scan_started(struct ath11k
*ar
)
4425 lockdep_assert_held(&ar
->data_lock
);
4427 switch (ar
->scan
.state
) {
4428 case ATH11K_SCAN_IDLE
:
4429 case ATH11K_SCAN_RUNNING
:
4430 case ATH11K_SCAN_ABORTING
:
4431 ath11k_warn(ar
->ab
, "received scan started event in an invalid scan state: %s (%d)\n",
4432 ath11k_scan_state_str(ar
->scan
.state
),
4435 case ATH11K_SCAN_STARTING
:
4436 ar
->scan
.state
= ATH11K_SCAN_RUNNING
;
4437 complete(&ar
->scan
.started
);
4442 static void ath11k_wmi_event_scan_start_failed(struct ath11k
*ar
)
4444 lockdep_assert_held(&ar
->data_lock
);
4446 switch (ar
->scan
.state
) {
4447 case ATH11K_SCAN_IDLE
:
4448 case ATH11K_SCAN_RUNNING
:
4449 case ATH11K_SCAN_ABORTING
:
4450 ath11k_warn(ar
->ab
, "received scan start failed event in an invalid scan state: %s (%d)\n",
4451 ath11k_scan_state_str(ar
->scan
.state
),
4454 case ATH11K_SCAN_STARTING
:
4455 complete(&ar
->scan
.started
);
4456 __ath11k_mac_scan_finish(ar
);
4461 static void ath11k_wmi_event_scan_completed(struct ath11k
*ar
)
4463 lockdep_assert_held(&ar
->data_lock
);
4465 switch (ar
->scan
.state
) {
4466 case ATH11K_SCAN_IDLE
:
4467 case ATH11K_SCAN_STARTING
:
4468 /* One suspected reason scan can be completed while starting is
4469 * if firmware fails to deliver all scan events to the host,
4470 * e.g. when transport pipe is full. This has been observed
4471 * with spectral scan phyerr events starving wmi transport
4472 * pipe. In such case the "scan completed" event should be (and
4473 * is) ignored by the host as it may be just firmware's scan
4474 * state machine recovering.
4476 ath11k_warn(ar
->ab
, "received scan completed event in an invalid scan state: %s (%d)\n",
4477 ath11k_scan_state_str(ar
->scan
.state
),
4480 case ATH11K_SCAN_RUNNING
:
4481 case ATH11K_SCAN_ABORTING
:
4482 __ath11k_mac_scan_finish(ar
);
4487 static void ath11k_wmi_event_scan_bss_chan(struct ath11k
*ar
)
4489 lockdep_assert_held(&ar
->data_lock
);
4491 switch (ar
->scan
.state
) {
4492 case ATH11K_SCAN_IDLE
:
4493 case ATH11K_SCAN_STARTING
:
4494 ath11k_warn(ar
->ab
, "received scan bss chan event in an invalid scan state: %s (%d)\n",
4495 ath11k_scan_state_str(ar
->scan
.state
),
4498 case ATH11K_SCAN_RUNNING
:
4499 case ATH11K_SCAN_ABORTING
:
4500 ar
->scan_channel
= NULL
;
4505 static void ath11k_wmi_event_scan_foreign_chan(struct ath11k
*ar
, u32 freq
)
4507 lockdep_assert_held(&ar
->data_lock
);
4509 switch (ar
->scan
.state
) {
4510 case ATH11K_SCAN_IDLE
:
4511 case ATH11K_SCAN_STARTING
:
4512 ath11k_warn(ar
->ab
, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
4513 ath11k_scan_state_str(ar
->scan
.state
),
4516 case ATH11K_SCAN_RUNNING
:
4517 case ATH11K_SCAN_ABORTING
:
4518 ar
->scan_channel
= ieee80211_get_channel(ar
->hw
->wiphy
, freq
);
4524 ath11k_wmi_event_scan_type_str(enum wmi_scan_event_type type
,
4525 enum wmi_scan_completion_reason reason
)
4528 case WMI_SCAN_EVENT_STARTED
:
4530 case WMI_SCAN_EVENT_COMPLETED
:
4532 case WMI_SCAN_REASON_COMPLETED
:
4534 case WMI_SCAN_REASON_CANCELLED
:
4535 return "completed [cancelled]";
4536 case WMI_SCAN_REASON_PREEMPTED
:
4537 return "completed [preempted]";
4538 case WMI_SCAN_REASON_TIMEDOUT
:
4539 return "completed [timedout]";
4540 case WMI_SCAN_REASON_INTERNAL_FAILURE
:
4541 return "completed [internal err]";
4542 case WMI_SCAN_REASON_MAX
:
4545 return "completed [unknown]";
4546 case WMI_SCAN_EVENT_BSS_CHANNEL
:
4547 return "bss channel";
4548 case WMI_SCAN_EVENT_FOREIGN_CHAN
:
4549 return "foreign channel";
4550 case WMI_SCAN_EVENT_DEQUEUED
:
4552 case WMI_SCAN_EVENT_PREEMPTED
:
4554 case WMI_SCAN_EVENT_START_FAILED
:
4555 return "start failed";
4556 case WMI_SCAN_EVENT_RESTARTED
:
4558 case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT
:
4559 return "foreign channel exit";
4565 static int ath11k_pull_scan_ev(struct ath11k_base
*ab
, struct sk_buff
*skb
,
4566 struct wmi_scan_event
*scan_evt_param
)
4569 const struct wmi_scan_event
*ev
;
4572 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4575 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4579 ev
= tb
[WMI_TAG_SCAN_EVENT
];
4581 ath11k_warn(ab
, "failed to fetch scan ev");
4586 scan_evt_param
->event_type
= ev
->event_type
;
4587 scan_evt_param
->reason
= ev
->reason
;
4588 scan_evt_param
->channel_freq
= ev
->channel_freq
;
4589 scan_evt_param
->scan_req_id
= ev
->scan_req_id
;
4590 scan_evt_param
->scan_id
= ev
->scan_id
;
4591 scan_evt_param
->vdev_id
= ev
->vdev_id
;
4592 scan_evt_param
->tsf_timestamp
= ev
->tsf_timestamp
;
4598 static int ath11k_pull_peer_sta_kickout_ev(struct ath11k_base
*ab
, struct sk_buff
*skb
,
4599 struct wmi_peer_sta_kickout_arg
*arg
)
4602 const struct wmi_peer_sta_kickout_event
*ev
;
4605 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4608 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4612 ev
= tb
[WMI_TAG_PEER_STA_KICKOUT_EVENT
];
4614 ath11k_warn(ab
, "failed to fetch peer sta kickout ev");
4619 arg
->mac_addr
= ev
->peer_macaddr
.addr
;
4625 static int ath11k_pull_roam_ev(struct ath11k_base
*ab
, struct sk_buff
*skb
,
4626 struct wmi_roam_event
*roam_ev
)
4629 const struct wmi_roam_event
*ev
;
4632 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4635 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4639 ev
= tb
[WMI_TAG_ROAM_EVENT
];
4641 ath11k_warn(ab
, "failed to fetch roam ev");
4646 roam_ev
->vdev_id
= ev
->vdev_id
;
4647 roam_ev
->reason
= ev
->reason
;
4648 roam_ev
->rssi
= ev
->rssi
;
4654 static int freq_to_idx(struct ath11k
*ar
, int freq
)
4656 struct ieee80211_supported_band
*sband
;
4657 int band
, ch
, idx
= 0;
4659 for (band
= NL80211_BAND_2GHZ
; band
< NUM_NL80211_BANDS
; band
++) {
4660 sband
= ar
->hw
->wiphy
->bands
[band
];
4664 for (ch
= 0; ch
< sband
->n_channels
; ch
++, idx
++)
4665 if (sband
->channels
[ch
].center_freq
== freq
)
4673 static int ath11k_pull_chan_info_ev(struct ath11k_base
*ab
, u8
*evt_buf
,
4674 u32 len
, struct wmi_chan_info_event
*ch_info_ev
)
4677 const struct wmi_chan_info_event
*ev
;
4680 tb
= ath11k_wmi_tlv_parse_alloc(ab
, evt_buf
, len
, GFP_ATOMIC
);
4683 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4687 ev
= tb
[WMI_TAG_CHAN_INFO_EVENT
];
4689 ath11k_warn(ab
, "failed to fetch chan info ev");
4694 ch_info_ev
->err_code
= ev
->err_code
;
4695 ch_info_ev
->freq
= ev
->freq
;
4696 ch_info_ev
->cmd_flags
= ev
->cmd_flags
;
4697 ch_info_ev
->noise_floor
= ev
->noise_floor
;
4698 ch_info_ev
->rx_clear_count
= ev
->rx_clear_count
;
4699 ch_info_ev
->cycle_count
= ev
->cycle_count
;
4700 ch_info_ev
->chan_tx_pwr_range
= ev
->chan_tx_pwr_range
;
4701 ch_info_ev
->chan_tx_pwr_tp
= ev
->chan_tx_pwr_tp
;
4702 ch_info_ev
->rx_frame_count
= ev
->rx_frame_count
;
4703 ch_info_ev
->tx_frame_cnt
= ev
->tx_frame_cnt
;
4704 ch_info_ev
->mac_clk_mhz
= ev
->mac_clk_mhz
;
4705 ch_info_ev
->vdev_id
= ev
->vdev_id
;
4712 ath11k_pull_pdev_bss_chan_info_ev(struct ath11k_base
*ab
, struct sk_buff
*skb
,
4713 struct wmi_pdev_bss_chan_info_event
*bss_ch_info_ev
)
4716 const struct wmi_pdev_bss_chan_info_event
*ev
;
4719 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4722 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4726 ev
= tb
[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT
];
4728 ath11k_warn(ab
, "failed to fetch pdev bss chan info ev");
4733 bss_ch_info_ev
->pdev_id
= ev
->pdev_id
;
4734 bss_ch_info_ev
->freq
= ev
->freq
;
4735 bss_ch_info_ev
->noise_floor
= ev
->noise_floor
;
4736 bss_ch_info_ev
->rx_clear_count_low
= ev
->rx_clear_count_low
;
4737 bss_ch_info_ev
->rx_clear_count_high
= ev
->rx_clear_count_high
;
4738 bss_ch_info_ev
->cycle_count_low
= ev
->cycle_count_low
;
4739 bss_ch_info_ev
->cycle_count_high
= ev
->cycle_count_high
;
4740 bss_ch_info_ev
->tx_cycle_count_low
= ev
->tx_cycle_count_low
;
4741 bss_ch_info_ev
->tx_cycle_count_high
= ev
->tx_cycle_count_high
;
4742 bss_ch_info_ev
->rx_cycle_count_low
= ev
->rx_cycle_count_low
;
4743 bss_ch_info_ev
->rx_cycle_count_high
= ev
->rx_cycle_count_high
;
4744 bss_ch_info_ev
->rx_bss_cycle_count_low
= ev
->rx_bss_cycle_count_low
;
4745 bss_ch_info_ev
->rx_bss_cycle_count_high
= ev
->rx_bss_cycle_count_high
;
4752 ath11k_pull_vdev_install_key_compl_ev(struct ath11k_base
*ab
, struct sk_buff
*skb
,
4753 struct wmi_vdev_install_key_complete_arg
*arg
)
4756 const struct wmi_vdev_install_key_compl_event
*ev
;
4759 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4762 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4766 ev
= tb
[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT
];
4768 ath11k_warn(ab
, "failed to fetch vdev install key compl ev");
4773 arg
->vdev_id
= ev
->vdev_id
;
4774 arg
->macaddr
= ev
->peer_macaddr
.addr
;
4775 arg
->key_idx
= ev
->key_idx
;
4776 arg
->key_flags
= ev
->key_flags
;
4777 arg
->status
= ev
->status
;
4783 static int ath11k_pull_peer_assoc_conf_ev(struct ath11k_base
*ab
, struct sk_buff
*skb
,
4784 struct wmi_peer_assoc_conf_arg
*peer_assoc_conf
)
4787 const struct wmi_peer_assoc_conf_event
*ev
;
4790 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4793 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4797 ev
= tb
[WMI_TAG_PEER_ASSOC_CONF_EVENT
];
4799 ath11k_warn(ab
, "failed to fetch peer assoc conf ev");
4804 peer_assoc_conf
->vdev_id
= ev
->vdev_id
;
4805 peer_assoc_conf
->macaddr
= ev
->peer_macaddr
.addr
;
4811 static void ath11k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base
*src
,
4812 struct ath11k_fw_stats_pdev
*dst
)
4814 dst
->ch_noise_floor
= src
->chan_nf
;
4815 dst
->tx_frame_count
= src
->tx_frame_count
;
4816 dst
->rx_frame_count
= src
->rx_frame_count
;
4817 dst
->rx_clear_count
= src
->rx_clear_count
;
4818 dst
->cycle_count
= src
->cycle_count
;
4819 dst
->phy_err_count
= src
->phy_err_count
;
4820 dst
->chan_tx_power
= src
->chan_tx_pwr
;
4824 ath11k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx
*src
,
4825 struct ath11k_fw_stats_pdev
*dst
)
4827 dst
->comp_queued
= src
->comp_queued
;
4828 dst
->comp_delivered
= src
->comp_delivered
;
4829 dst
->msdu_enqued
= src
->msdu_enqued
;
4830 dst
->mpdu_enqued
= src
->mpdu_enqued
;
4831 dst
->wmm_drop
= src
->wmm_drop
;
4832 dst
->local_enqued
= src
->local_enqued
;
4833 dst
->local_freed
= src
->local_freed
;
4834 dst
->hw_queued
= src
->hw_queued
;
4835 dst
->hw_reaped
= src
->hw_reaped
;
4836 dst
->underrun
= src
->underrun
;
4837 dst
->tx_abort
= src
->tx_abort
;
4838 dst
->mpdus_requed
= src
->mpdus_requed
;
4839 dst
->tx_ko
= src
->tx_ko
;
4840 dst
->data_rc
= src
->data_rc
;
4841 dst
->self_triggers
= src
->self_triggers
;
4842 dst
->sw_retry_failure
= src
->sw_retry_failure
;
4843 dst
->illgl_rate_phy_err
= src
->illgl_rate_phy_err
;
4844 dst
->pdev_cont_xretry
= src
->pdev_cont_xretry
;
4845 dst
->pdev_tx_timeout
= src
->pdev_tx_timeout
;
4846 dst
->pdev_resets
= src
->pdev_resets
;
4847 dst
->stateless_tid_alloc_failure
= src
->stateless_tid_alloc_failure
;
4848 dst
->phy_underrun
= src
->phy_underrun
;
4849 dst
->txop_ovf
= src
->txop_ovf
;
4852 static void ath11k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx
*src
,
4853 struct ath11k_fw_stats_pdev
*dst
)
4855 dst
->mid_ppdu_route_change
= src
->mid_ppdu_route_change
;
4856 dst
->status_rcvd
= src
->status_rcvd
;
4857 dst
->r0_frags
= src
->r0_frags
;
4858 dst
->r1_frags
= src
->r1_frags
;
4859 dst
->r2_frags
= src
->r2_frags
;
4860 dst
->r3_frags
= src
->r3_frags
;
4861 dst
->htt_msdus
= src
->htt_msdus
;
4862 dst
->htt_mpdus
= src
->htt_mpdus
;
4863 dst
->loc_msdus
= src
->loc_msdus
;
4864 dst
->loc_mpdus
= src
->loc_mpdus
;
4865 dst
->oversize_amsdu
= src
->oversize_amsdu
;
4866 dst
->phy_errs
= src
->phy_errs
;
4867 dst
->phy_err_drop
= src
->phy_err_drop
;
4868 dst
->mpdu_errs
= src
->mpdu_errs
;
4872 ath11k_wmi_pull_vdev_stats(const struct wmi_vdev_stats
*src
,
4873 struct ath11k_fw_stats_vdev
*dst
)
4877 dst
->vdev_id
= src
->vdev_id
;
4878 dst
->beacon_snr
= src
->beacon_snr
;
4879 dst
->data_snr
= src
->data_snr
;
4880 dst
->num_rx_frames
= src
->num_rx_frames
;
4881 dst
->num_rts_fail
= src
->num_rts_fail
;
4882 dst
->num_rts_success
= src
->num_rts_success
;
4883 dst
->num_rx_err
= src
->num_rx_err
;
4884 dst
->num_rx_discard
= src
->num_rx_discard
;
4885 dst
->num_tx_not_acked
= src
->num_tx_not_acked
;
4887 for (i
= 0; i
< ARRAY_SIZE(src
->num_tx_frames
); i
++)
4888 dst
->num_tx_frames
[i
] = src
->num_tx_frames
[i
];
4890 for (i
= 0; i
< ARRAY_SIZE(src
->num_tx_frames_retries
); i
++)
4891 dst
->num_tx_frames_retries
[i
] = src
->num_tx_frames_retries
[i
];
4893 for (i
= 0; i
< ARRAY_SIZE(src
->num_tx_frames_failures
); i
++)
4894 dst
->num_tx_frames_failures
[i
] = src
->num_tx_frames_failures
[i
];
4896 for (i
= 0; i
< ARRAY_SIZE(src
->tx_rate_history
); i
++)
4897 dst
->tx_rate_history
[i
] = src
->tx_rate_history
[i
];
4899 for (i
= 0; i
< ARRAY_SIZE(src
->beacon_rssi_history
); i
++)
4900 dst
->beacon_rssi_history
[i
] = src
->beacon_rssi_history
[i
];
4904 ath11k_wmi_pull_bcn_stats(const struct wmi_bcn_stats
*src
,
4905 struct ath11k_fw_stats_bcn
*dst
)
4907 dst
->vdev_id
= src
->vdev_id
;
4908 dst
->tx_bcn_succ_cnt
= src
->tx_bcn_succ_cnt
;
4909 dst
->tx_bcn_outage_cnt
= src
->tx_bcn_outage_cnt
;
4912 int ath11k_wmi_pull_fw_stats(struct ath11k_base
*ab
, struct sk_buff
*skb
,
4913 struct ath11k_fw_stats
*stats
)
4916 const struct wmi_stats_event
*ev
;
4921 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, len
, GFP_ATOMIC
);
4924 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4928 ev
= tb
[WMI_TAG_STATS_EVENT
];
4929 data
= tb
[WMI_TAG_ARRAY_BYTE
];
4931 ath11k_warn(ab
, "failed to fetch update stats ev");
4936 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
4937 "wmi stats update ev pdev_id %d pdev %i vdev %i bcn %i\n",
4939 ev
->num_pdev_stats
, ev
->num_vdev_stats
,
4942 stats
->pdev_id
= ev
->pdev_id
;
4943 stats
->stats_id
= 0;
4945 for (i
= 0; i
< ev
->num_pdev_stats
; i
++) {
4946 const struct wmi_pdev_stats
*src
;
4947 struct ath11k_fw_stats_pdev
*dst
;
4950 if (len
< sizeof(*src
)) {
4955 stats
->stats_id
= WMI_REQUEST_PDEV_STAT
;
4957 data
+= sizeof(*src
);
4958 len
-= sizeof(*src
);
4960 dst
= kzalloc(sizeof(*dst
), GFP_ATOMIC
);
4964 ath11k_wmi_pull_pdev_stats_base(&src
->base
, dst
);
4965 ath11k_wmi_pull_pdev_stats_tx(&src
->tx
, dst
);
4966 ath11k_wmi_pull_pdev_stats_rx(&src
->rx
, dst
);
4967 list_add_tail(&dst
->list
, &stats
->pdevs
);
4970 for (i
= 0; i
< ev
->num_vdev_stats
; i
++) {
4971 const struct wmi_vdev_stats
*src
;
4972 struct ath11k_fw_stats_vdev
*dst
;
4975 if (len
< sizeof(*src
)) {
4980 stats
->stats_id
= WMI_REQUEST_VDEV_STAT
;
4982 data
+= sizeof(*src
);
4983 len
-= sizeof(*src
);
4985 dst
= kzalloc(sizeof(*dst
), GFP_ATOMIC
);
4989 ath11k_wmi_pull_vdev_stats(src
, dst
);
4990 list_add_tail(&dst
->list
, &stats
->vdevs
);
4993 for (i
= 0; i
< ev
->num_bcn_stats
; i
++) {
4994 const struct wmi_bcn_stats
*src
;
4995 struct ath11k_fw_stats_bcn
*dst
;
4998 if (len
< sizeof(*src
)) {
5003 stats
->stats_id
= WMI_REQUEST_BCN_STAT
;
5005 data
+= sizeof(*src
);
5006 len
-= sizeof(*src
);
5008 dst
= kzalloc(sizeof(*dst
), GFP_ATOMIC
);
5012 ath11k_wmi_pull_bcn_stats(src
, dst
);
5013 list_add_tail(&dst
->list
, &stats
->bcn
);
5021 ath11k_pull_pdev_temp_ev(struct ath11k_base
*ab
, u8
*evt_buf
,
5022 u32 len
, const struct wmi_pdev_temperature_event
*ev
)
5027 tb
= ath11k_wmi_tlv_parse_alloc(ab
, evt_buf
, len
, GFP_ATOMIC
);
5030 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
5034 ev
= tb
[WMI_TAG_PDEV_TEMPERATURE_EVENT
];
5036 ath11k_warn(ab
, "failed to fetch pdev temp ev");
5045 size_t ath11k_wmi_fw_stats_num_vdevs(struct list_head
*head
)
5047 struct ath11k_fw_stats_vdev
*i
;
5050 list_for_each_entry(i
, head
, list
)
5056 static size_t ath11k_wmi_fw_stats_num_bcn(struct list_head
*head
)
5058 struct ath11k_fw_stats_bcn
*i
;
5061 list_for_each_entry(i
, head
, list
)
5068 ath11k_wmi_fw_pdev_base_stats_fill(const struct ath11k_fw_stats_pdev
*pdev
,
5069 char *buf
, u32
*length
)
5072 u32 buf_len
= ATH11K_FW_STATS_BUF_SIZE
;
5074 len
+= scnprintf(buf
+ len
, buf_len
- len
, "\n");
5075 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n",
5076 "ath11k PDEV stats");
5077 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n\n",
5078 "=================");
5080 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5081 "Channel noise floor", pdev
->ch_noise_floor
);
5082 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5083 "Channel TX power", pdev
->chan_tx_power
);
5084 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5085 "TX frame count", pdev
->tx_frame_count
);
5086 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5087 "RX frame count", pdev
->rx_frame_count
);
5088 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5089 "RX clear count", pdev
->rx_clear_count
);
5090 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5091 "Cycle count", pdev
->cycle_count
);
5092 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5093 "PHY error count", pdev
->phy_err_count
);
5099 ath11k_wmi_fw_pdev_tx_stats_fill(const struct ath11k_fw_stats_pdev
*pdev
,
5100 char *buf
, u32
*length
)
5103 u32 buf_len
= ATH11K_FW_STATS_BUF_SIZE
;
5105 len
+= scnprintf(buf
+ len
, buf_len
- len
, "\n%30s\n",
5106 "ath11k PDEV TX stats");
5107 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n\n",
5108 "====================");
5110 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5111 "HTT cookies queued", pdev
->comp_queued
);
5112 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5113 "HTT cookies disp.", pdev
->comp_delivered
);
5114 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5115 "MSDU queued", pdev
->msdu_enqued
);
5116 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5117 "MPDU queued", pdev
->mpdu_enqued
);
5118 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5119 "MSDUs dropped", pdev
->wmm_drop
);
5120 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5121 "Local enqued", pdev
->local_enqued
);
5122 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5123 "Local freed", pdev
->local_freed
);
5124 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5125 "HW queued", pdev
->hw_queued
);
5126 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5127 "PPDUs reaped", pdev
->hw_reaped
);
5128 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5129 "Num underruns", pdev
->underrun
);
5130 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5131 "PPDUs cleaned", pdev
->tx_abort
);
5132 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5133 "MPDUs requed", pdev
->mpdus_requed
);
5134 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5135 "Excessive retries", pdev
->tx_ko
);
5136 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5137 "HW rate", pdev
->data_rc
);
5138 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5139 "Sched self triggers", pdev
->self_triggers
);
5140 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5141 "Dropped due to SW retries",
5142 pdev
->sw_retry_failure
);
5143 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5144 "Illegal rate phy errors",
5145 pdev
->illgl_rate_phy_err
);
5146 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5147 "PDEV continuous xretry", pdev
->pdev_cont_xretry
);
5148 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5149 "TX timeout", pdev
->pdev_tx_timeout
);
5150 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5151 "PDEV resets", pdev
->pdev_resets
);
5152 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5153 "Stateless TIDs alloc failures",
5154 pdev
->stateless_tid_alloc_failure
);
5155 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5156 "PHY underrun", pdev
->phy_underrun
);
5157 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5158 "MPDU is more than txop limit", pdev
->txop_ovf
);
5163 ath11k_wmi_fw_pdev_rx_stats_fill(const struct ath11k_fw_stats_pdev
*pdev
,
5164 char *buf
, u32
*length
)
5167 u32 buf_len
= ATH11K_FW_STATS_BUF_SIZE
;
5169 len
+= scnprintf(buf
+ len
, buf_len
- len
, "\n%30s\n",
5170 "ath11k PDEV RX stats");
5171 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n\n",
5172 "====================");
5174 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5175 "Mid PPDU route change",
5176 pdev
->mid_ppdu_route_change
);
5177 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5178 "Tot. number of statuses", pdev
->status_rcvd
);
5179 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5180 "Extra frags on rings 0", pdev
->r0_frags
);
5181 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5182 "Extra frags on rings 1", pdev
->r1_frags
);
5183 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5184 "Extra frags on rings 2", pdev
->r2_frags
);
5185 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5186 "Extra frags on rings 3", pdev
->r3_frags
);
5187 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5188 "MSDUs delivered to HTT", pdev
->htt_msdus
);
5189 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5190 "MPDUs delivered to HTT", pdev
->htt_mpdus
);
5191 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5192 "MSDUs delivered to stack", pdev
->loc_msdus
);
5193 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5194 "MPDUs delivered to stack", pdev
->loc_mpdus
);
5195 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5196 "Oversized AMSUs", pdev
->oversize_amsdu
);
5197 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5198 "PHY errors", pdev
->phy_errs
);
5199 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5200 "PHY errors drops", pdev
->phy_err_drop
);
5201 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5202 "MPDU errors (FCS, MIC, ENC)", pdev
->mpdu_errs
);
5207 ath11k_wmi_fw_vdev_stats_fill(struct ath11k
*ar
,
5208 const struct ath11k_fw_stats_vdev
*vdev
,
5209 char *buf
, u32
*length
)
5212 u32 buf_len
= ATH11K_FW_STATS_BUF_SIZE
;
5213 struct ath11k_vif
*arvif
= ath11k_mac_get_arvif(ar
, vdev
->vdev_id
);
5217 /* VDEV stats has all the active VDEVs of other PDEVs as well,
5218 * ignoring those not part of requested PDEV
5223 vif_macaddr
= arvif
->vif
->addr
;
5225 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
5226 "VDEV ID", vdev
->vdev_id
);
5227 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %pM\n",
5228 "VDEV MAC address", vif_macaddr
);
5229 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
5230 "beacon snr", vdev
->beacon_snr
);
5231 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
5232 "data snr", vdev
->data_snr
);
5233 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
5234 "num rx frames", vdev
->num_rx_frames
);
5235 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
5236 "num rts fail", vdev
->num_rts_fail
);
5237 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
5238 "num rts success", vdev
->num_rts_success
);
5239 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
5240 "num rx err", vdev
->num_rx_err
);
5241 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
5242 "num rx discard", vdev
->num_rx_discard
);
5243 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
5244 "num tx not acked", vdev
->num_tx_not_acked
);
5246 for (i
= 0 ; i
< ARRAY_SIZE(vdev
->num_tx_frames
); i
++)
5247 len
+= scnprintf(buf
+ len
, buf_len
- len
,
5250 vdev
->num_tx_frames
[i
]);
5252 for (i
= 0 ; i
< ARRAY_SIZE(vdev
->num_tx_frames_retries
); i
++)
5253 len
+= scnprintf(buf
+ len
, buf_len
- len
,
5255 "num tx frames retries", i
,
5256 vdev
->num_tx_frames_retries
[i
]);
5258 for (i
= 0 ; i
< ARRAY_SIZE(vdev
->num_tx_frames_failures
); i
++)
5259 len
+= scnprintf(buf
+ len
, buf_len
- len
,
5261 "num tx frames failures", i
,
5262 vdev
->num_tx_frames_failures
[i
]);
5264 for (i
= 0 ; i
< ARRAY_SIZE(vdev
->tx_rate_history
); i
++)
5265 len
+= scnprintf(buf
+ len
, buf_len
- len
,
5266 "%25s [%02d] 0x%08x\n",
5267 "tx rate history", i
,
5268 vdev
->tx_rate_history
[i
]);
5270 for (i
= 0 ; i
< ARRAY_SIZE(vdev
->beacon_rssi_history
); i
++)
5271 len
+= scnprintf(buf
+ len
, buf_len
- len
,
5273 "beacon rssi history", i
,
5274 vdev
->beacon_rssi_history
[i
]);
5276 len
+= scnprintf(buf
+ len
, buf_len
- len
, "\n");
5281 ath11k_wmi_fw_bcn_stats_fill(struct ath11k
*ar
,
5282 const struct ath11k_fw_stats_bcn
*bcn
,
5283 char *buf
, u32
*length
)
5286 u32 buf_len
= ATH11K_FW_STATS_BUF_SIZE
;
5287 struct ath11k_vif
*arvif
= ath11k_mac_get_arvif(ar
, bcn
->vdev_id
);
5291 ath11k_warn(ar
->ab
, "invalid vdev id %d in bcn stats",
5296 vdev_macaddr
= arvif
->vif
->addr
;
5298 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
5299 "VDEV ID", bcn
->vdev_id
);
5300 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %pM\n",
5301 "VDEV MAC address", vdev_macaddr
);
5302 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n\n",
5303 "================");
5304 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
5305 "Num of beacon tx success", bcn
->tx_bcn_succ_cnt
);
5306 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
5307 "Num of beacon tx failures", bcn
->tx_bcn_outage_cnt
);
5309 len
+= scnprintf(buf
+ len
, buf_len
- len
, "\n");
5313 void ath11k_wmi_fw_stats_fill(struct ath11k
*ar
,
5314 struct ath11k_fw_stats
*fw_stats
,
5315 u32 stats_id
, char *buf
)
5318 u32 buf_len
= ATH11K_FW_STATS_BUF_SIZE
;
5319 const struct ath11k_fw_stats_pdev
*pdev
;
5320 const struct ath11k_fw_stats_vdev
*vdev
;
5321 const struct ath11k_fw_stats_bcn
*bcn
;
5324 spin_lock_bh(&ar
->data_lock
);
5326 if (stats_id
== WMI_REQUEST_PDEV_STAT
) {
5327 pdev
= list_first_entry_or_null(&fw_stats
->pdevs
,
5328 struct ath11k_fw_stats_pdev
, list
);
5330 ath11k_warn(ar
->ab
, "failed to get pdev stats\n");
5334 ath11k_wmi_fw_pdev_base_stats_fill(pdev
, buf
, &len
);
5335 ath11k_wmi_fw_pdev_tx_stats_fill(pdev
, buf
, &len
);
5336 ath11k_wmi_fw_pdev_rx_stats_fill(pdev
, buf
, &len
);
5339 if (stats_id
== WMI_REQUEST_VDEV_STAT
) {
5340 len
+= scnprintf(buf
+ len
, buf_len
- len
, "\n");
5341 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n",
5342 "ath11k VDEV stats");
5343 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n\n",
5344 "=================");
5346 list_for_each_entry(vdev
, &fw_stats
->vdevs
, list
)
5347 ath11k_wmi_fw_vdev_stats_fill(ar
, vdev
, buf
, &len
);
5350 if (stats_id
== WMI_REQUEST_BCN_STAT
) {
5351 num_bcn
= ath11k_wmi_fw_stats_num_bcn(&fw_stats
->bcn
);
5353 len
+= scnprintf(buf
+ len
, buf_len
- len
, "\n");
5354 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s (%zu)\n",
5355 "ath11k Beacon stats", num_bcn
);
5356 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n\n",
5357 "===================");
5359 list_for_each_entry(bcn
, &fw_stats
->bcn
, list
)
5360 ath11k_wmi_fw_bcn_stats_fill(ar
, bcn
, buf
, &len
);
5364 spin_unlock_bh(&ar
->data_lock
);
5372 static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base
*ab
)
5374 /* try to send pending beacons first. they take priority */
5375 wake_up(&ab
->wmi_ab
.tx_credits_wq
);
5378 static void ath11k_wmi_htc_tx_complete(struct ath11k_base
*ab
,
5379 struct sk_buff
*skb
)
5384 static bool ath11k_reg_is_world_alpha(char *alpha
)
5386 return alpha
[0] == '0' && alpha
[1] == '0';
5389 static int ath11k_reg_chan_list_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5391 struct cur_regulatory_info
*reg_info
= NULL
;
5392 struct ieee80211_regdomain
*regd
= NULL
;
5393 bool intersect
= false;
5394 int ret
= 0, pdev_idx
;
5397 reg_info
= kzalloc(sizeof(*reg_info
), GFP_ATOMIC
);
5403 ret
= ath11k_pull_reg_chan_list_update_ev(ab
, skb
, reg_info
);
5405 ath11k_warn(ab
, "failed to extract regulatory info from received event\n");
5409 if (reg_info
->status_code
!= REG_SET_CC_STATUS_PASS
) {
5410 /* In case of failure to set the requested ctry,
5411 * fw retains the current regd. We print a failure info
5412 * and return from here.
5414 ath11k_warn(ab
, "Failed to set the requested Country regulatory setting\n");
5418 pdev_idx
= reg_info
->phy_id
;
5420 if (pdev_idx
>= ab
->num_radios
) {
5421 /* Process the event for phy0 only if single_pdev_only
5422 * is true. If pdev_idx is valid but not 0, discard the
5423 * event. Otherwise, it goes to fallback.
5425 if (ab
->hw_params
.single_pdev_only
&&
5426 pdev_idx
< ab
->hw_params
.num_rxmda_per_pdev
)
5432 /* Avoid multiple overwrites to default regd, during core
5433 * stop-start after mac registration.
5435 if (ab
->default_regd
[pdev_idx
] && !ab
->new_regd
[pdev_idx
] &&
5436 !memcmp((char *)ab
->default_regd
[pdev_idx
]->alpha2
,
5437 (char *)reg_info
->alpha2
, 2))
5440 /* Intersect new rules with default regd if a new country setting was
5441 * requested, i.e a default regd was already set during initialization
5442 * and the regd coming from this event has a valid country info.
5444 if (ab
->default_regd
[pdev_idx
] &&
5445 !ath11k_reg_is_world_alpha((char *)
5446 ab
->default_regd
[pdev_idx
]->alpha2
) &&
5447 !ath11k_reg_is_world_alpha((char *)reg_info
->alpha2
))
5450 regd
= ath11k_reg_build_regd(ab
, reg_info
, intersect
);
5452 ath11k_warn(ab
, "failed to build regd from reg_info\n");
5456 spin_lock(&ab
->base_lock
);
5457 if (test_bit(ATH11K_FLAG_REGISTERED
, &ab
->dev_flags
)) {
5458 /* Once mac is registered, ar is valid and all CC events from
5459 * fw is considered to be received due to user requests
5461 * Free previously built regd before assigning the newly
5462 * generated regd to ar. NULL pointer handling will be
5463 * taken care by kfree itself.
5465 ar
= ab
->pdevs
[pdev_idx
].ar
;
5466 kfree(ab
->new_regd
[pdev_idx
]);
5467 ab
->new_regd
[pdev_idx
] = regd
;
5468 ieee80211_queue_work(ar
->hw
, &ar
->regd_update_work
);
5470 /* Multiple events for the same *ar is not expected. But we
5471 * can still clear any previously stored default_regd if we
5472 * are receiving this event for the same radio by mistake.
5473 * NULL pointer handling will be taken care by kfree itself.
5475 kfree(ab
->default_regd
[pdev_idx
]);
5476 /* This regd would be applied during mac registration */
5477 ab
->default_regd
[pdev_idx
] = regd
;
5479 ab
->dfs_region
= reg_info
->dfs_region
;
5480 spin_unlock(&ab
->base_lock
);
5485 /* Fallback to older reg (by sending previous country setting
5486 * again if fw has succeded and we failed to process here.
5487 * The Regdomain should be uniform across driver and fw. Since the
5488 * FW has processed the command and sent a success status, we expect
5489 * this function to succeed as well. If it doesn't, CTRY needs to be
5490 * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
5492 /* TODO: This is rare, but still should also be handled */
5496 kfree(reg_info
->reg_rules_2g_ptr
);
5497 kfree(reg_info
->reg_rules_5g_ptr
);
5503 static int ath11k_wmi_tlv_rdy_parse(struct ath11k_base
*ab
, u16 tag
, u16 len
,
5504 const void *ptr
, void *data
)
5506 struct wmi_tlv_rdy_parse
*rdy_parse
= data
;
5507 struct wmi_ready_event fixed_param
;
5508 struct wmi_mac_addr
*addr_list
;
5509 struct ath11k_pdev
*pdev
;
5514 case WMI_TAG_READY_EVENT
:
5515 memset(&fixed_param
, 0, sizeof(fixed_param
));
5516 memcpy(&fixed_param
, (struct wmi_ready_event
*)ptr
,
5517 min_t(u16
, sizeof(fixed_param
), len
));
5518 ab
->wlan_init_status
= fixed_param
.ready_event_min
.status
;
5519 rdy_parse
->num_extra_mac_addr
=
5520 fixed_param
.ready_event_min
.num_extra_mac_addr
;
5522 ether_addr_copy(ab
->mac_addr
,
5523 fixed_param
.ready_event_min
.mac_addr
.addr
);
5524 ab
->pktlog_defs_checksum
= fixed_param
.pktlog_defs_checksum
;
5525 ab
->wmi_ready
= true;
5527 case WMI_TAG_ARRAY_FIXED_STRUCT
:
5528 addr_list
= (struct wmi_mac_addr
*)ptr
;
5529 num_mac_addr
= rdy_parse
->num_extra_mac_addr
;
5531 if (!(ab
->num_radios
> 1 && num_mac_addr
>= ab
->num_radios
))
5534 for (i
= 0; i
< ab
->num_radios
; i
++) {
5535 pdev
= &ab
->pdevs
[i
];
5536 ether_addr_copy(pdev
->mac_addr
, addr_list
[i
].addr
);
5538 ab
->pdevs_macaddr_valid
= true;
5547 static int ath11k_ready_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5549 struct wmi_tlv_rdy_parse rdy_parse
= { };
5552 ret
= ath11k_wmi_tlv_iter(ab
, skb
->data
, skb
->len
,
5553 ath11k_wmi_tlv_rdy_parse
, &rdy_parse
);
5555 ath11k_warn(ab
, "failed to parse tlv %d\n", ret
);
5559 complete(&ab
->wmi_ab
.unified_ready
);
5563 static void ath11k_peer_delete_resp_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5565 struct wmi_peer_delete_resp_event peer_del_resp
;
5567 if (ath11k_pull_peer_del_resp_ev(ab
, skb
, &peer_del_resp
) != 0) {
5568 ath11k_warn(ab
, "failed to extract peer delete resp");
5572 /* TODO: Do we need to validate whether ath11k_peer_find() return NULL
5573 * Why this is needed when there is HTT event for peer delete
5577 static inline const char *ath11k_wmi_vdev_resp_print(u32 vdev_resp_status
)
5579 switch (vdev_resp_status
) {
5580 case WMI_VDEV_START_RESPONSE_INVALID_VDEVID
:
5581 return "invalid vdev id";
5582 case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED
:
5583 return "not supported";
5584 case WMI_VDEV_START_RESPONSE_DFS_VIOLATION
:
5585 return "dfs violation";
5586 case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN
:
5587 return "invalid regdomain";
5593 static void ath11k_vdev_start_resp_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5595 struct wmi_vdev_start_resp_event vdev_start_resp
;
5599 if (ath11k_pull_vdev_start_resp_tlv(ab
, skb
, &vdev_start_resp
) != 0) {
5600 ath11k_warn(ab
, "failed to extract vdev start resp");
5605 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, vdev_start_resp
.vdev_id
);
5607 ath11k_warn(ab
, "invalid vdev id in vdev start resp ev %d",
5608 vdev_start_resp
.vdev_id
);
5613 ar
->last_wmi_vdev_start_status
= 0;
5615 status
= vdev_start_resp
.status
;
5617 if (WARN_ON_ONCE(status
)) {
5618 ath11k_warn(ab
, "vdev start resp error status %d (%s)\n",
5619 status
, ath11k_wmi_vdev_resp_print(status
));
5620 ar
->last_wmi_vdev_start_status
= status
;
5623 complete(&ar
->vdev_setup_done
);
5627 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "vdev start resp for vdev id %d",
5628 vdev_start_resp
.vdev_id
);
5631 static void ath11k_bcn_tx_status_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5633 u32 vdev_id
, tx_status
;
5635 if (ath11k_pull_bcn_tx_status_ev(ab
, skb
->data
, skb
->len
,
5636 &vdev_id
, &tx_status
) != 0) {
5637 ath11k_warn(ab
, "failed to extract bcn tx status");
5642 static void ath11k_vdev_stopped_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5647 if (ath11k_pull_vdev_stopped_param_tlv(ab
, skb
, &vdev_id
) != 0) {
5648 ath11k_warn(ab
, "failed to extract vdev stopped event");
5653 ar
= ath11k_mac_get_ar_vdev_stop_status(ab
, vdev_id
);
5655 ath11k_warn(ab
, "invalid vdev id in vdev stopped ev %d",
5661 complete(&ar
->vdev_setup_done
);
5665 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "vdev stopped for vdev id %d", vdev_id
);
5668 static void ath11k_mgmt_rx_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5670 struct mgmt_rx_event_params rx_ev
= {0};
5672 struct ieee80211_rx_status
*status
= IEEE80211_SKB_RXCB(skb
);
5673 struct ieee80211_hdr
*hdr
;
5675 struct ieee80211_supported_band
*sband
;
5677 if (ath11k_pull_mgmt_rx_params_tlv(ab
, skb
, &rx_ev
) != 0) {
5678 ath11k_warn(ab
, "failed to extract mgmt rx event");
5683 memset(status
, 0, sizeof(*status
));
5685 ath11k_dbg(ab
, ATH11K_DBG_MGMT
, "mgmt rx event status %08x\n",
5689 ar
= ath11k_mac_get_ar_by_pdev_id(ab
, rx_ev
.pdev_id
);
5692 ath11k_warn(ab
, "invalid pdev_id %d in mgmt_rx_event\n",
5698 if ((test_bit(ATH11K_CAC_RUNNING
, &ar
->dev_flags
)) ||
5699 (rx_ev
.status
& (WMI_RX_STATUS_ERR_DECRYPT
|
5700 WMI_RX_STATUS_ERR_KEY_CACHE_MISS
| WMI_RX_STATUS_ERR_CRC
))) {
5705 if (rx_ev
.status
& WMI_RX_STATUS_ERR_MIC
)
5706 status
->flag
|= RX_FLAG_MMIC_ERROR
;
5708 if (rx_ev
.chan_freq
>= ATH11K_MIN_6G_FREQ
) {
5709 status
->band
= NL80211_BAND_6GHZ
;
5710 } else if (rx_ev
.channel
>= 1 && rx_ev
.channel
<= 14) {
5711 status
->band
= NL80211_BAND_2GHZ
;
5712 } else if (rx_ev
.channel
>= 36 && rx_ev
.channel
<= ATH11K_MAX_5G_CHAN
) {
5713 status
->band
= NL80211_BAND_5GHZ
;
5715 /* Shouldn't happen unless list of advertised channels to
5716 * mac80211 has been changed.
5723 if (rx_ev
.phy_mode
== MODE_11B
&&
5724 (status
->band
== NL80211_BAND_5GHZ
|| status
->band
== NL80211_BAND_6GHZ
))
5725 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
5726 "wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status
->band
);
5728 sband
= &ar
->mac
.sbands
[status
->band
];
5730 status
->freq
= ieee80211_channel_to_frequency(rx_ev
.channel
,
5732 status
->signal
= rx_ev
.snr
+ ATH11K_DEFAULT_NOISE_FLOOR
;
5733 status
->rate_idx
= ath11k_mac_bitrate_to_idx(sband
, rx_ev
.rate
/ 100);
5735 hdr
= (struct ieee80211_hdr
*)skb
->data
;
5736 fc
= le16_to_cpu(hdr
->frame_control
);
5738 /* Firmware is guaranteed to report all essential management frames via
5739 * WMI while it can deliver some extra via HTT. Since there can be
5740 * duplicates split the reporting wrt monitor/sniffing.
5742 status
->flag
|= RX_FLAG_SKIP_MONITOR
;
5744 /* In case of PMF, FW delivers decrypted frames with Protected Bit set.
5745 * Don't clear that. Also, FW delivers broadcast management frames
5746 * (ex: group privacy action frames in mesh) as encrypted payload.
5748 if (ieee80211_has_protected(hdr
->frame_control
) &&
5749 !is_multicast_ether_addr(ieee80211_get_DA(hdr
))) {
5750 status
->flag
|= RX_FLAG_DECRYPTED
;
5752 if (!ieee80211_is_robust_mgmt_frame(skb
)) {
5753 status
->flag
|= RX_FLAG_IV_STRIPPED
|
5754 RX_FLAG_MMIC_STRIPPED
;
5755 hdr
->frame_control
= __cpu_to_le16(fc
&
5756 ~IEEE80211_FCTL_PROTECTED
);
5760 /* TODO: Pending handle beacon implementation
5761 *if (ieee80211_is_beacon(hdr->frame_control))
5762 * ath11k_mac_handle_beacon(ar, skb);
5765 ath11k_dbg(ab
, ATH11K_DBG_MGMT
,
5766 "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
5768 fc
& IEEE80211_FCTL_FTYPE
, fc
& IEEE80211_FCTL_STYPE
);
5770 ath11k_dbg(ab
, ATH11K_DBG_MGMT
,
5771 "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
5772 status
->freq
, status
->band
, status
->signal
,
5775 ieee80211_rx_ni(ar
->hw
, skb
);
5781 static void ath11k_mgmt_tx_compl_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5783 struct wmi_mgmt_tx_compl_event tx_compl_param
= {0};
5786 if (ath11k_pull_mgmt_tx_compl_param_tlv(ab
, skb
, &tx_compl_param
) != 0) {
5787 ath11k_warn(ab
, "failed to extract mgmt tx compl event");
5792 ar
= ath11k_mac_get_ar_by_pdev_id(ab
, tx_compl_param
.pdev_id
);
5794 ath11k_warn(ab
, "invalid pdev id %d in mgmt_tx_compl_event\n",
5795 tx_compl_param
.pdev_id
);
5799 wmi_process_mgmt_tx_comp(ar
, tx_compl_param
.desc_id
,
5800 tx_compl_param
.status
);
5802 ath11k_dbg(ab
, ATH11K_DBG_MGMT
,
5803 "mgmt tx compl ev pdev_id %d, desc_id %d, status %d",
5804 tx_compl_param
.pdev_id
, tx_compl_param
.desc_id
,
5805 tx_compl_param
.status
);
5811 static struct ath11k
*ath11k_get_ar_on_scan_abort(struct ath11k_base
*ab
,
5815 struct ath11k_pdev
*pdev
;
5818 for (i
= 0; i
< ab
->num_radios
; i
++) {
5819 pdev
= rcu_dereference(ab
->pdevs_active
[i
]);
5820 if (pdev
&& pdev
->ar
) {
5823 spin_lock_bh(&ar
->data_lock
);
5824 if (ar
->scan
.state
== ATH11K_SCAN_ABORTING
&&
5825 ar
->scan
.vdev_id
== vdev_id
) {
5826 spin_unlock_bh(&ar
->data_lock
);
5829 spin_unlock_bh(&ar
->data_lock
);
5835 static void ath11k_scan_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5838 struct wmi_scan_event scan_ev
= {0};
5840 if (ath11k_pull_scan_ev(ab
, skb
, &scan_ev
) != 0) {
5841 ath11k_warn(ab
, "failed to extract scan event");
5847 /* In case the scan was cancelled, ex. during interface teardown,
5848 * the interface will not be found in active interfaces.
5849 * Rather, in such scenarios, iterate over the active pdev's to
5850 * search 'ar' if the corresponding 'ar' scan is ABORTING and the
5851 * aborting scan's vdev id matches this event info.
5853 if (scan_ev
.event_type
== WMI_SCAN_EVENT_COMPLETED
&&
5854 scan_ev
.reason
== WMI_SCAN_REASON_CANCELLED
)
5855 ar
= ath11k_get_ar_on_scan_abort(ab
, scan_ev
.vdev_id
);
5857 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, scan_ev
.vdev_id
);
5860 ath11k_warn(ab
, "Received scan event for unknown vdev");
5865 spin_lock_bh(&ar
->data_lock
);
5867 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
5868 "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
5869 ath11k_wmi_event_scan_type_str(scan_ev
.event_type
, scan_ev
.reason
),
5870 scan_ev
.event_type
, scan_ev
.reason
, scan_ev
.channel_freq
,
5871 scan_ev
.scan_req_id
, scan_ev
.scan_id
, scan_ev
.vdev_id
,
5872 ath11k_scan_state_str(ar
->scan
.state
), ar
->scan
.state
);
5874 switch (scan_ev
.event_type
) {
5875 case WMI_SCAN_EVENT_STARTED
:
5876 ath11k_wmi_event_scan_started(ar
);
5878 case WMI_SCAN_EVENT_COMPLETED
:
5879 ath11k_wmi_event_scan_completed(ar
);
5881 case WMI_SCAN_EVENT_BSS_CHANNEL
:
5882 ath11k_wmi_event_scan_bss_chan(ar
);
5884 case WMI_SCAN_EVENT_FOREIGN_CHAN
:
5885 ath11k_wmi_event_scan_foreign_chan(ar
, scan_ev
.channel_freq
);
5887 case WMI_SCAN_EVENT_START_FAILED
:
5888 ath11k_warn(ab
, "received scan start failure event\n");
5889 ath11k_wmi_event_scan_start_failed(ar
);
5891 case WMI_SCAN_EVENT_DEQUEUED
:
5892 case WMI_SCAN_EVENT_PREEMPTED
:
5893 case WMI_SCAN_EVENT_RESTARTED
:
5894 case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT
:
5899 spin_unlock_bh(&ar
->data_lock
);
5904 static void ath11k_peer_sta_kickout_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5906 struct wmi_peer_sta_kickout_arg arg
= {};
5907 struct ieee80211_sta
*sta
;
5908 struct ath11k_peer
*peer
;
5911 if (ath11k_pull_peer_sta_kickout_ev(ab
, skb
, &arg
) != 0) {
5912 ath11k_warn(ab
, "failed to extract peer sta kickout event");
5918 spin_lock_bh(&ab
->base_lock
);
5920 peer
= ath11k_peer_find_by_addr(ab
, arg
.mac_addr
);
5923 ath11k_warn(ab
, "peer not found %pM\n",
5928 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, peer
->vdev_id
);
5930 ath11k_warn(ab
, "invalid vdev id in peer sta kickout ev %d",
5935 sta
= ieee80211_find_sta_by_ifaddr(ar
->hw
,
5936 arg
.mac_addr
, NULL
);
5938 ath11k_warn(ab
, "Spurious quick kickout for STA %pM\n",
5943 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "peer sta kickout event %pM",
5946 ieee80211_report_low_ack(sta
, 10);
5949 spin_unlock_bh(&ab
->base_lock
);
5953 static void ath11k_roam_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5955 struct wmi_roam_event roam_ev
= {};
5958 if (ath11k_pull_roam_ev(ab
, skb
, &roam_ev
) != 0) {
5959 ath11k_warn(ab
, "failed to extract roam event");
5963 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
5964 "wmi roam event vdev %u reason 0x%08x rssi %d\n",
5965 roam_ev
.vdev_id
, roam_ev
.reason
, roam_ev
.rssi
);
5968 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, roam_ev
.vdev_id
);
5970 ath11k_warn(ab
, "invalid vdev id in roam ev %d",
5976 if (roam_ev
.reason
>= WMI_ROAM_REASON_MAX
)
5977 ath11k_warn(ab
, "ignoring unknown roam event reason %d on vdev %i\n",
5978 roam_ev
.reason
, roam_ev
.vdev_id
);
5980 switch (roam_ev
.reason
) {
5981 case WMI_ROAM_REASON_BEACON_MISS
:
5982 /* TODO: Pending beacon miss and connection_loss_work
5984 * ath11k_mac_handle_beacon_miss(ar, vdev_id);
5987 case WMI_ROAM_REASON_BETTER_AP
:
5988 case WMI_ROAM_REASON_LOW_RSSI
:
5989 case WMI_ROAM_REASON_SUITABLE_AP_FOUND
:
5990 case WMI_ROAM_REASON_HO_FAILED
:
5991 ath11k_warn(ab
, "ignoring not implemented roam event reason %d on vdev %i\n",
5992 roam_ev
.reason
, roam_ev
.vdev_id
);
5999 static void ath11k_chan_info_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
6001 struct wmi_chan_info_event ch_info_ev
= {0};
6003 struct survey_info
*survey
;
6005 /* HW channel counters frequency value in hertz */
6006 u32 cc_freq_hz
= ab
->cc_freq_hz
;
6008 if (ath11k_pull_chan_info_ev(ab
, skb
->data
, skb
->len
, &ch_info_ev
) != 0) {
6009 ath11k_warn(ab
, "failed to extract chan info event");
6013 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
6014 "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n",
6015 ch_info_ev
.vdev_id
, ch_info_ev
.err_code
, ch_info_ev
.freq
,
6016 ch_info_ev
.cmd_flags
, ch_info_ev
.noise_floor
,
6017 ch_info_ev
.rx_clear_count
, ch_info_ev
.cycle_count
,
6018 ch_info_ev
.mac_clk_mhz
);
6020 if (ch_info_ev
.cmd_flags
== WMI_CHAN_INFO_END_RESP
) {
6021 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "chan info report completed\n");
6026 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, ch_info_ev
.vdev_id
);
6028 ath11k_warn(ab
, "invalid vdev id in chan info ev %d",
6029 ch_info_ev
.vdev_id
);
6033 spin_lock_bh(&ar
->data_lock
);
6035 switch (ar
->scan
.state
) {
6036 case ATH11K_SCAN_IDLE
:
6037 case ATH11K_SCAN_STARTING
:
6038 ath11k_warn(ab
, "received chan info event without a scan request, ignoring\n");
6040 case ATH11K_SCAN_RUNNING
:
6041 case ATH11K_SCAN_ABORTING
:
6045 idx
= freq_to_idx(ar
, ch_info_ev
.freq
);
6046 if (idx
>= ARRAY_SIZE(ar
->survey
)) {
6047 ath11k_warn(ab
, "chan info: invalid frequency %d (idx %d out of bounds)\n",
6048 ch_info_ev
.freq
, idx
);
6052 /* If FW provides MAC clock frequency in Mhz, overriding the initialized
6053 * HW channel counters frequency value
6055 if (ch_info_ev
.mac_clk_mhz
)
6056 cc_freq_hz
= (ch_info_ev
.mac_clk_mhz
* 1000);
6058 if (ch_info_ev
.cmd_flags
== WMI_CHAN_INFO_START_RESP
) {
6059 survey
= &ar
->survey
[idx
];
6060 memset(survey
, 0, sizeof(*survey
));
6061 survey
->noise
= ch_info_ev
.noise_floor
;
6062 survey
->filled
= SURVEY_INFO_NOISE_DBM
| SURVEY_INFO_TIME
|
6063 SURVEY_INFO_TIME_BUSY
;
6064 survey
->time
= div_u64(ch_info_ev
.cycle_count
, cc_freq_hz
);
6065 survey
->time_busy
= div_u64(ch_info_ev
.rx_clear_count
, cc_freq_hz
);
6068 spin_unlock_bh(&ar
->data_lock
);
6073 ath11k_pdev_bss_chan_info_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
6075 struct wmi_pdev_bss_chan_info_event bss_ch_info_ev
= {};
6076 struct survey_info
*survey
;
6078 u32 cc_freq_hz
= ab
->cc_freq_hz
;
6079 u64 busy
, total
, tx
, rx
, rx_bss
;
6082 if (ath11k_pull_pdev_bss_chan_info_ev(ab
, skb
, &bss_ch_info_ev
) != 0) {
6083 ath11k_warn(ab
, "failed to extract pdev bss chan info event");
6087 busy
= (u64
)(bss_ch_info_ev
.rx_clear_count_high
) << 32 |
6088 bss_ch_info_ev
.rx_clear_count_low
;
6090 total
= (u64
)(bss_ch_info_ev
.cycle_count_high
) << 32 |
6091 bss_ch_info_ev
.cycle_count_low
;
6093 tx
= (u64
)(bss_ch_info_ev
.tx_cycle_count_high
) << 32 |
6094 bss_ch_info_ev
.tx_cycle_count_low
;
6096 rx
= (u64
)(bss_ch_info_ev
.rx_cycle_count_high
) << 32 |
6097 bss_ch_info_ev
.rx_cycle_count_low
;
6099 rx_bss
= (u64
)(bss_ch_info_ev
.rx_bss_cycle_count_high
) << 32 |
6100 bss_ch_info_ev
.rx_bss_cycle_count_low
;
6102 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
6103 "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
6104 bss_ch_info_ev
.pdev_id
, bss_ch_info_ev
.freq
,
6105 bss_ch_info_ev
.noise_floor
, busy
, total
,
6109 ar
= ath11k_mac_get_ar_by_pdev_id(ab
, bss_ch_info_ev
.pdev_id
);
6112 ath11k_warn(ab
, "invalid pdev id %d in bss_chan_info event\n",
6113 bss_ch_info_ev
.pdev_id
);
6118 spin_lock_bh(&ar
->data_lock
);
6119 idx
= freq_to_idx(ar
, bss_ch_info_ev
.freq
);
6120 if (idx
>= ARRAY_SIZE(ar
->survey
)) {
6121 ath11k_warn(ab
, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
6122 bss_ch_info_ev
.freq
, idx
);
6126 survey
= &ar
->survey
[idx
];
6128 survey
->noise
= bss_ch_info_ev
.noise_floor
;
6129 survey
->time
= div_u64(total
, cc_freq_hz
);
6130 survey
->time_busy
= div_u64(busy
, cc_freq_hz
);
6131 survey
->time_rx
= div_u64(rx_bss
, cc_freq_hz
);
6132 survey
->time_tx
= div_u64(tx
, cc_freq_hz
);
6133 survey
->filled
|= (SURVEY_INFO_NOISE_DBM
|
6135 SURVEY_INFO_TIME_BUSY
|
6136 SURVEY_INFO_TIME_RX
|
6137 SURVEY_INFO_TIME_TX
);
6139 spin_unlock_bh(&ar
->data_lock
);
6140 complete(&ar
->bss_survey_done
);
6145 static void ath11k_vdev_install_key_compl_event(struct ath11k_base
*ab
,
6146 struct sk_buff
*skb
)
6148 struct wmi_vdev_install_key_complete_arg install_key_compl
= {0};
6151 if (ath11k_pull_vdev_install_key_compl_ev(ab
, skb
, &install_key_compl
) != 0) {
6152 ath11k_warn(ab
, "failed to extract install key compl event");
6156 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
6157 "vdev install key ev idx %d flags %08x macaddr %pM status %d\n",
6158 install_key_compl
.key_idx
, install_key_compl
.key_flags
,
6159 install_key_compl
.macaddr
, install_key_compl
.status
);
6162 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, install_key_compl
.vdev_id
);
6164 ath11k_warn(ab
, "invalid vdev id in install key compl ev %d",
6165 install_key_compl
.vdev_id
);
6170 ar
->install_key_status
= 0;
6172 if (install_key_compl
.status
!= WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS
) {
6173 ath11k_warn(ab
, "install key failed for %pM status %d\n",
6174 install_key_compl
.macaddr
, install_key_compl
.status
);
6175 ar
->install_key_status
= install_key_compl
.status
;
6178 complete(&ar
->install_key_done
);
6182 static void ath11k_service_available_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
6185 const struct wmi_service_available_event
*ev
;
6189 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
6192 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
6196 ev
= tb
[WMI_TAG_SERVICE_AVAILABLE_EVENT
];
6198 ath11k_warn(ab
, "failed to fetch svc available ev");
6203 /* TODO: Use wmi_service_segment_offset information to get the service
6204 * especially when more services are advertised in multiple sevice
6207 for (i
= 0, j
= WMI_MAX_SERVICE
;
6208 i
< WMI_SERVICE_SEGMENT_BM_SIZE32
&& j
< WMI_MAX_EXT_SERVICE
;
6211 if (ev
->wmi_service_segment_bitmap
[i
] &
6212 BIT(j
% WMI_AVAIL_SERVICE_BITS_IN_SIZE32
))
6213 set_bit(j
, ab
->wmi_ab
.svc_map
);
6214 } while (++j
% WMI_AVAIL_SERVICE_BITS_IN_SIZE32
);
6217 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
6218 "wmi_ext_service_bitmap 0:0x%x, 1:0x%x, 2:0x%x, 3:0x%x",
6219 ev
->wmi_service_segment_bitmap
[0], ev
->wmi_service_segment_bitmap
[1],
6220 ev
->wmi_service_segment_bitmap
[2], ev
->wmi_service_segment_bitmap
[3]);
6225 static void ath11k_peer_assoc_conf_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
6227 struct wmi_peer_assoc_conf_arg peer_assoc_conf
= {0};
6230 if (ath11k_pull_peer_assoc_conf_ev(ab
, skb
, &peer_assoc_conf
) != 0) {
6231 ath11k_warn(ab
, "failed to extract peer assoc conf event");
6235 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
6236 "peer assoc conf ev vdev id %d macaddr %pM\n",
6237 peer_assoc_conf
.vdev_id
, peer_assoc_conf
.macaddr
);
6240 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, peer_assoc_conf
.vdev_id
);
6243 ath11k_warn(ab
, "invalid vdev id in peer assoc conf ev %d",
6244 peer_assoc_conf
.vdev_id
);
6249 complete(&ar
->peer_assoc_done
);
6253 static void ath11k_update_stats_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
6255 ath11k_debugfs_fw_stats_process(ab
, skb
);
6258 /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
6259 * is not part of BDF CTL(Conformance test limits) table entries.
6261 static void ath11k_pdev_ctl_failsafe_check_event(struct ath11k_base
*ab
,
6262 struct sk_buff
*skb
)
6265 const struct wmi_pdev_ctl_failsafe_chk_event
*ev
;
6268 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
6271 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
6275 ev
= tb
[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT
];
6277 ath11k_warn(ab
, "failed to fetch pdev ctl failsafe check ev");
6282 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
6283 "pdev ctl failsafe check ev status %d\n",
6284 ev
->ctl_failsafe_status
);
6286 /* If ctl_failsafe_status is set to 1 FW will max out the Transmit power
6287 * to 10 dBm else the CTL power entry in the BDF would be picked up.
6289 if (ev
->ctl_failsafe_status
!= 0)
6290 ath11k_warn(ab
, "pdev ctl failsafe failure status %d",
6291 ev
->ctl_failsafe_status
);
6297 ath11k_wmi_process_csa_switch_count_event(struct ath11k_base
*ab
,
6298 const struct wmi_pdev_csa_switch_ev
*ev
,
6299 const u32
*vdev_ids
)
6302 struct ath11k_vif
*arvif
;
6304 /* Finish CSA once the switch count becomes NULL */
6305 if (ev
->current_switch_count
)
6309 for (i
= 0; i
< ev
->num_vdevs
; i
++) {
6310 arvif
= ath11k_mac_get_arvif_by_vdev_id(ab
, vdev_ids
[i
]);
6313 ath11k_warn(ab
, "Recvd csa status for unknown vdev %d",
6318 if (arvif
->is_up
&& arvif
->vif
->csa_active
)
6319 ieee80211_csa_finish(arvif
->vif
);
6325 ath11k_wmi_pdev_csa_switch_count_status_event(struct ath11k_base
*ab
,
6326 struct sk_buff
*skb
)
6329 const struct wmi_pdev_csa_switch_ev
*ev
;
6330 const u32
*vdev_ids
;
6333 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
6336 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
6340 ev
= tb
[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT
];
6341 vdev_ids
= tb
[WMI_TAG_ARRAY_UINT32
];
6343 if (!ev
|| !vdev_ids
) {
6344 ath11k_warn(ab
, "failed to fetch pdev csa switch count ev");
6349 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
6350 "pdev csa switch count %d for pdev %d, num_vdevs %d",
6351 ev
->current_switch_count
, ev
->pdev_id
,
6354 ath11k_wmi_process_csa_switch_count_event(ab
, ev
, vdev_ids
);
6360 ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
6363 const struct wmi_pdev_radar_ev
*ev
;
6367 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
6370 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
6374 ev
= tb
[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT
];
6377 ath11k_warn(ab
, "failed to fetch pdev dfs radar detected ev");
6382 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
6383 "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d",
6384 ev
->pdev_id
, ev
->detection_mode
, ev
->chan_freq
, ev
->chan_width
,
6385 ev
->detector_id
, ev
->segment_id
, ev
->timestamp
, ev
->is_chirp
,
6386 ev
->freq_offset
, ev
->sidx
);
6388 ar
= ath11k_mac_get_ar_by_pdev_id(ab
, ev
->pdev_id
);
6391 ath11k_warn(ab
, "radar detected in invalid pdev %d\n",
6396 ath11k_dbg(ar
->ab
, ATH11K_DBG_REG
, "DFS Radar Detected in pdev %d\n",
6399 if (ar
->dfs_block_radar_events
)
6400 ath11k_info(ab
, "DFS Radar detected, but ignored as requested\n");
6402 ieee80211_radar_detected(ar
->hw
);
6409 ath11k_wmi_pdev_temperature_event(struct ath11k_base
*ab
,
6410 struct sk_buff
*skb
)
6413 struct wmi_pdev_temperature_event ev
= {0};
6415 if (ath11k_pull_pdev_temp_ev(ab
, skb
->data
, skb
->len
, &ev
) != 0) {
6416 ath11k_warn(ab
, "failed to extract pdev temperature event");
6420 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
6421 "pdev temperature ev temp %d pdev_id %d\n", ev
.temp
, ev
.pdev_id
);
6423 ar
= ath11k_mac_get_ar_by_pdev_id(ab
, ev
.pdev_id
);
6425 ath11k_warn(ab
, "invalid pdev id in pdev temperature ev %d", ev
.pdev_id
);
6429 ath11k_thermal_event_temperature(ar
, ev
.temp
);
6432 static void ath11k_wmi_tlv_op_rx(struct ath11k_base
*ab
, struct sk_buff
*skb
)
6434 struct wmi_cmd_hdr
*cmd_hdr
;
6435 enum wmi_tlv_event_id id
;
6437 cmd_hdr
= (struct wmi_cmd_hdr
*)skb
->data
;
6438 id
= FIELD_GET(WMI_CMD_HDR_CMD_ID
, (cmd_hdr
->cmd_id
));
6440 if (skb_pull(skb
, sizeof(struct wmi_cmd_hdr
)) == NULL
)
6444 /* Process all the WMI events here */
6445 case WMI_SERVICE_READY_EVENTID
:
6446 ath11k_service_ready_event(ab
, skb
);
6448 case WMI_SERVICE_READY_EXT_EVENTID
:
6449 ath11k_service_ready_ext_event(ab
, skb
);
6451 case WMI_SERVICE_READY_EXT2_EVENTID
:
6452 ath11k_service_ready_ext2_event(ab
, skb
);
6454 case WMI_REG_CHAN_LIST_CC_EVENTID
:
6455 ath11k_reg_chan_list_event(ab
, skb
);
6457 case WMI_READY_EVENTID
:
6458 ath11k_ready_event(ab
, skb
);
6460 case WMI_PEER_DELETE_RESP_EVENTID
:
6461 ath11k_peer_delete_resp_event(ab
, skb
);
6463 case WMI_VDEV_START_RESP_EVENTID
:
6464 ath11k_vdev_start_resp_event(ab
, skb
);
6466 case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID
:
6467 ath11k_bcn_tx_status_event(ab
, skb
);
6469 case WMI_VDEV_STOPPED_EVENTID
:
6470 ath11k_vdev_stopped_event(ab
, skb
);
6472 case WMI_MGMT_RX_EVENTID
:
6473 ath11k_mgmt_rx_event(ab
, skb
);
6474 /* mgmt_rx_event() owns the skb now! */
6476 case WMI_MGMT_TX_COMPLETION_EVENTID
:
6477 ath11k_mgmt_tx_compl_event(ab
, skb
);
6479 case WMI_SCAN_EVENTID
:
6480 ath11k_scan_event(ab
, skb
);
6482 case WMI_PEER_STA_KICKOUT_EVENTID
:
6483 ath11k_peer_sta_kickout_event(ab
, skb
);
6485 case WMI_ROAM_EVENTID
:
6486 ath11k_roam_event(ab
, skb
);
6488 case WMI_CHAN_INFO_EVENTID
:
6489 ath11k_chan_info_event(ab
, skb
);
6491 case WMI_PDEV_BSS_CHAN_INFO_EVENTID
:
6492 ath11k_pdev_bss_chan_info_event(ab
, skb
);
6494 case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID
:
6495 ath11k_vdev_install_key_compl_event(ab
, skb
);
6497 case WMI_SERVICE_AVAILABLE_EVENTID
:
6498 ath11k_service_available_event(ab
, skb
);
6500 case WMI_PEER_ASSOC_CONF_EVENTID
:
6501 ath11k_peer_assoc_conf_event(ab
, skb
);
6503 case WMI_UPDATE_STATS_EVENTID
:
6504 ath11k_update_stats_event(ab
, skb
);
6506 case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID
:
6507 ath11k_pdev_ctl_failsafe_check_event(ab
, skb
);
6509 case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID
:
6510 ath11k_wmi_pdev_csa_switch_count_status_event(ab
, skb
);
6512 case WMI_PDEV_TEMPERATURE_EVENTID
:
6513 ath11k_wmi_pdev_temperature_event(ab
, skb
);
6515 case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID
:
6516 ath11k_wmi_pdev_dma_ring_buf_release_event(ab
, skb
);
6518 /* add Unsupported events here */
6519 case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID
:
6520 case WMI_VDEV_DELETE_RESP_EVENTID
:
6521 case WMI_PEER_OPER_MODE_CHANGE_EVENTID
:
6522 case WMI_TWT_ENABLE_EVENTID
:
6523 case WMI_TWT_DISABLE_EVENTID
:
6524 case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID
:
6525 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
6526 "ignoring unsupported event 0x%x\n", id
);
6528 case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID
:
6529 ath11k_wmi_pdev_dfs_radar_detected_event(ab
, skb
);
6531 /* TODO: Add remaining events */
6533 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "Unknown eventid: 0x%x\n", id
);
6541 static int ath11k_connect_pdev_htc_service(struct ath11k_base
*ab
,
6545 u32 svc_id
[] = { ATH11K_HTC_SVC_ID_WMI_CONTROL
,
6546 ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1
,
6547 ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2
};
6549 struct ath11k_htc_svc_conn_req conn_req
;
6550 struct ath11k_htc_svc_conn_resp conn_resp
;
6552 memset(&conn_req
, 0, sizeof(conn_req
));
6553 memset(&conn_resp
, 0, sizeof(conn_resp
));
6555 /* these fields are the same for all service endpoints */
6556 conn_req
.ep_ops
.ep_tx_complete
= ath11k_wmi_htc_tx_complete
;
6557 conn_req
.ep_ops
.ep_rx_complete
= ath11k_wmi_tlv_op_rx
;
6558 conn_req
.ep_ops
.ep_tx_credits
= ath11k_wmi_op_ep_tx_credits
;
6560 /* connect to control service */
6561 conn_req
.service_id
= svc_id
[pdev_idx
];
6563 status
= ath11k_htc_connect_service(&ab
->htc
, &conn_req
, &conn_resp
);
6565 ath11k_warn(ab
, "failed to connect to WMI CONTROL service status: %d\n",
6570 ab
->wmi_ab
.wmi_endpoint_id
[pdev_idx
] = conn_resp
.eid
;
6571 ab
->wmi_ab
.wmi
[pdev_idx
].eid
= conn_resp
.eid
;
6572 ab
->wmi_ab
.max_msg_len
[pdev_idx
] = conn_resp
.max_msg_len
;
6578 ath11k_wmi_send_unit_test_cmd(struct ath11k
*ar
,
6579 struct wmi_unit_test_cmd ut_cmd
,
6582 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
6583 struct wmi_unit_test_cmd
*cmd
;
6584 struct sk_buff
*skb
;
6585 struct wmi_tlv
*tlv
;
6588 int buf_len
, arg_len
;
6592 arg_len
= sizeof(u32
) * ut_cmd
.num_args
;
6593 buf_len
= sizeof(ut_cmd
) + arg_len
+ TLV_HDR_SIZE
;
6595 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, buf_len
);
6599 cmd
= (struct wmi_unit_test_cmd
*)skb
->data
;
6600 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_UNIT_TEST_CMD
) |
6601 FIELD_PREP(WMI_TLV_LEN
, sizeof(ut_cmd
) - TLV_HDR_SIZE
);
6603 cmd
->vdev_id
= ut_cmd
.vdev_id
;
6604 cmd
->module_id
= ut_cmd
.module_id
;
6605 cmd
->num_args
= ut_cmd
.num_args
;
6606 cmd
->diag_token
= ut_cmd
.diag_token
;
6608 ptr
= skb
->data
+ sizeof(ut_cmd
);
6611 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_UINT32
) |
6612 FIELD_PREP(WMI_TLV_LEN
, arg_len
);
6614 ptr
+= TLV_HDR_SIZE
;
6617 for (i
= 0; i
< ut_cmd
.num_args
; i
++)
6618 ut_cmd_args
[i
] = test_args
[i
];
6620 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_UNIT_TEST_CMDID
);
6623 ath11k_warn(ar
->ab
, "failed to send WMI_UNIT_TEST CMD :%d\n",
6628 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
6629 "WMI unit test : module %d vdev %d n_args %d token %d\n",
6630 cmd
->module_id
, cmd
->vdev_id
, cmd
->num_args
,
6636 int ath11k_wmi_simulate_radar(struct ath11k
*ar
)
6638 struct ath11k_vif
*arvif
;
6639 u32 dfs_args
[DFS_MAX_TEST_ARGS
];
6640 struct wmi_unit_test_cmd wmi_ut
;
6641 bool arvif_found
= false;
6643 list_for_each_entry(arvif
, &ar
->arvifs
, list
) {
6644 if (arvif
->is_started
&& arvif
->vdev_type
== WMI_VDEV_TYPE_AP
) {
6653 dfs_args
[DFS_TEST_CMDID
] = 0;
6654 dfs_args
[DFS_TEST_PDEV_ID
] = ar
->pdev
->pdev_id
;
6655 /* Currently we could pass segment_id(b0 - b1), chirp(b2)
6656 * freq offset (b3 - b10) to unit test. For simulation
6657 * purpose this can be set to 0 which is valid.
6659 dfs_args
[DFS_TEST_RADAR_PARAM
] = 0;
6661 wmi_ut
.vdev_id
= arvif
->vdev_id
;
6662 wmi_ut
.module_id
= DFS_UNIT_TEST_MODULE
;
6663 wmi_ut
.num_args
= DFS_MAX_TEST_ARGS
;
6664 wmi_ut
.diag_token
= DFS_UNIT_TEST_TOKEN
;
6666 ath11k_dbg(ar
->ab
, ATH11K_DBG_REG
, "Triggering Radar Simulation\n");
6668 return ath11k_wmi_send_unit_test_cmd(ar
, wmi_ut
, dfs_args
);
6671 int ath11k_wmi_connect(struct ath11k_base
*ab
)
6676 wmi_ep_count
= ab
->htc
.wmi_ep_count
;
6677 if (wmi_ep_count
> ab
->hw_params
.max_radios
)
6680 for (i
= 0; i
< wmi_ep_count
; i
++)
6681 ath11k_connect_pdev_htc_service(ab
, i
);
6686 static void ath11k_wmi_pdev_detach(struct ath11k_base
*ab
, u8 pdev_id
)
6688 if (WARN_ON(pdev_id
>= MAX_RADIOS
))
6691 /* TODO: Deinit any pdev specific wmi resource */
6694 int ath11k_wmi_pdev_attach(struct ath11k_base
*ab
,
6697 struct ath11k_pdev_wmi
*wmi_handle
;
6699 if (pdev_id
>= ab
->hw_params
.max_radios
)
6702 wmi_handle
= &ab
->wmi_ab
.wmi
[pdev_id
];
6704 wmi_handle
->wmi_ab
= &ab
->wmi_ab
;
6707 /* TODO: Init remaining resource specific to pdev */
6712 int ath11k_wmi_attach(struct ath11k_base
*ab
)
6716 ret
= ath11k_wmi_pdev_attach(ab
, 0);
6721 ab
->wmi_ab
.preferred_hw_mode
= WMI_HOST_HW_MODE_MAX
;
6723 /* It's overwritten when service_ext_ready is handled */
6724 if (ab
->hw_params
.single_pdev_only
)
6725 ab
->wmi_ab
.preferred_hw_mode
= WMI_HOST_HW_MODE_SINGLE
;
6727 /* TODO: Init remaining wmi soc resources required */
6728 init_completion(&ab
->wmi_ab
.service_ready
);
6729 init_completion(&ab
->wmi_ab
.unified_ready
);
6734 void ath11k_wmi_detach(struct ath11k_base
*ab
)
6738 /* TODO: Deinit wmi resource specific to SOC as required */
6740 for (i
= 0; i
< ab
->htc
.wmi_ep_count
; i
++)
6741 ath11k_wmi_pdev_detach(ab
, i
);
6743 ath11k_wmi_free_dbring_caps(ab
);