1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
5 #include <linux/skbuff.h>
6 #include <linux/ctype.h>
7 #include <net/mac80211.h>
8 #include <net/cfg80211.h>
9 #include <linux/completion.h>
10 #include <linux/if_ether.h>
11 #include <linux/types.h>
12 #include <linux/pci.h>
13 #include <linux/uuid.h>
14 #include <linux/time.h>
22 struct wmi_tlv_policy
{
26 struct wmi_tlv_svc_ready_parse
{
27 bool wmi_svc_bitmap_done
;
30 struct wmi_tlv_svc_rdy_ext_parse
{
31 struct ath11k_service_ext_param param
;
32 struct wmi_soc_mac_phy_hw_mode_caps
*hw_caps
;
33 struct wmi_hw_mode_capabilities
*hw_mode_caps
;
36 struct wmi_hw_mode_capabilities pref_hw_mode_caps
;
37 struct wmi_mac_phy_capabilities
*mac_phy_caps
;
39 struct wmi_soc_hal_reg_capabilities
*soc_hal_reg_caps
;
40 struct wmi_hal_reg_capabilities_ext
*ext_hal_reg_caps
;
41 u32 n_ext_hal_reg_caps
;
44 bool ext_hal_reg_done
;
47 struct wmi_tlv_rdy_parse
{
48 u32 num_extra_mac_addr
;
51 static const struct wmi_tlv_policy wmi_tlv_policies
[] = {
54 [WMI_TAG_ARRAY_UINT32
]
56 [WMI_TAG_SERVICE_READY_EVENT
]
57 = { .min_len
= sizeof(struct wmi_service_ready_event
) },
58 [WMI_TAG_SERVICE_READY_EXT_EVENT
]
59 = { .min_len
= sizeof(struct wmi_service_ready_ext_event
) },
60 [WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS
]
61 = { .min_len
= sizeof(struct wmi_soc_mac_phy_hw_mode_caps
) },
62 [WMI_TAG_SOC_HAL_REG_CAPABILITIES
]
63 = { .min_len
= sizeof(struct wmi_soc_hal_reg_capabilities
) },
64 [WMI_TAG_VDEV_START_RESPONSE_EVENT
]
65 = { .min_len
= sizeof(struct wmi_vdev_start_resp_event
) },
66 [WMI_TAG_PEER_DELETE_RESP_EVENT
]
67 = { .min_len
= sizeof(struct wmi_peer_delete_resp_event
) },
68 [WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT
]
69 = { .min_len
= sizeof(struct wmi_bcn_tx_status_event
) },
70 [WMI_TAG_VDEV_STOPPED_EVENT
]
71 = { .min_len
= sizeof(struct wmi_vdev_stopped_event
) },
72 [WMI_TAG_REG_CHAN_LIST_CC_EVENT
]
73 = { .min_len
= sizeof(struct wmi_reg_chan_list_cc_event
) },
75 = { .min_len
= sizeof(struct wmi_mgmt_rx_hdr
) },
76 [WMI_TAG_MGMT_TX_COMPL_EVENT
]
77 = { .min_len
= sizeof(struct wmi_mgmt_tx_compl_event
) },
79 = { .min_len
= sizeof(struct wmi_scan_event
) },
80 [WMI_TAG_PEER_STA_KICKOUT_EVENT
]
81 = { .min_len
= sizeof(struct wmi_peer_sta_kickout_event
) },
83 = { .min_len
= sizeof(struct wmi_roam_event
) },
84 [WMI_TAG_CHAN_INFO_EVENT
]
85 = { .min_len
= sizeof(struct wmi_chan_info_event
) },
86 [WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT
]
87 = { .min_len
= sizeof(struct wmi_pdev_bss_chan_info_event
) },
88 [WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT
]
89 = { .min_len
= sizeof(struct wmi_vdev_install_key_compl_event
) },
90 [WMI_TAG_READY_EVENT
] = {
91 .min_len
= sizeof(struct wmi_ready_event_min
) },
92 [WMI_TAG_SERVICE_AVAILABLE_EVENT
]
93 = {.min_len
= sizeof(struct wmi_service_available_event
) },
94 [WMI_TAG_PEER_ASSOC_CONF_EVENT
]
95 = { .min_len
= sizeof(struct wmi_peer_assoc_conf_event
) },
97 = { .min_len
= sizeof(struct wmi_stats_event
) },
98 [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT
]
99 = { .min_len
= sizeof(struct wmi_pdev_ctl_failsafe_chk_event
) },
102 #define PRIMAP(_hw_mode_) \
103 [_hw_mode_] = _hw_mode_##_PRI
105 static const int ath11k_hw_mode_pri_map
[] = {
106 PRIMAP(WMI_HOST_HW_MODE_SINGLE
),
107 PRIMAP(WMI_HOST_HW_MODE_DBS
),
108 PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE
),
109 PRIMAP(WMI_HOST_HW_MODE_SBS
),
110 PRIMAP(WMI_HOST_HW_MODE_DBS_SBS
),
111 PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS
),
113 PRIMAP(WMI_HOST_HW_MODE_MAX
),
117 ath11k_wmi_tlv_iter(struct ath11k_base
*ab
, const void *ptr
, size_t len
,
118 int (*iter
)(struct ath11k_base
*ab
, u16 tag
, u16 len
,
119 const void *ptr
, void *data
),
122 const void *begin
= ptr
;
123 const struct wmi_tlv
*tlv
;
124 u16 tlv_tag
, tlv_len
;
128 if (len
< sizeof(*tlv
)) {
129 ath11k_err(ab
, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
130 ptr
- begin
, len
, sizeof(*tlv
));
135 tlv_tag
= FIELD_GET(WMI_TLV_TAG
, tlv
->header
);
136 tlv_len
= FIELD_GET(WMI_TLV_LEN
, tlv
->header
);
141 ath11k_err(ab
, "wmi tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
142 tlv_tag
, ptr
- begin
, len
, tlv_len
);
146 if (tlv_tag
< ARRAY_SIZE(wmi_tlv_policies
) &&
147 wmi_tlv_policies
[tlv_tag
].min_len
&&
148 wmi_tlv_policies
[tlv_tag
].min_len
> tlv_len
) {
149 ath11k_err(ab
, "wmi tlv parse failure of tag %hhu at byte %zd (%hhu bytes is less than min length %zu)\n",
150 tlv_tag
, ptr
- begin
, tlv_len
,
151 wmi_tlv_policies
[tlv_tag
].min_len
);
155 ret
= iter(ab
, tlv_tag
, tlv_len
, ptr
, data
);
166 static int ath11k_wmi_tlv_iter_parse(struct ath11k_base
*ab
, u16 tag
, u16 len
,
167 const void *ptr
, void *data
)
169 const void **tb
= data
;
171 if (tag
< WMI_TAG_MAX
)
177 static int ath11k_wmi_tlv_parse(struct ath11k_base
*ar
, const void **tb
,
178 const void *ptr
, size_t len
)
180 return ath11k_wmi_tlv_iter(ar
, ptr
, len
, ath11k_wmi_tlv_iter_parse
,
185 ath11k_wmi_tlv_parse_alloc(struct ath11k_base
*ab
, const void *ptr
,
186 size_t len
, gfp_t gfp
)
191 tb
= kcalloc(WMI_TAG_MAX
, sizeof(*tb
), gfp
);
193 return ERR_PTR(-ENOMEM
);
195 ret
= ath11k_wmi_tlv_parse(ab
, tb
, ptr
, len
);
204 static int ath11k_wmi_cmd_send_nowait(struct ath11k_pdev_wmi
*wmi
, struct sk_buff
*skb
,
207 struct ath11k_skb_cb
*skb_cb
= ATH11K_SKB_CB(skb
);
208 struct ath11k_base
*ab
= wmi
->wmi_ab
->ab
;
209 struct wmi_cmd_hdr
*cmd_hdr
;
213 if (skb_push(skb
, sizeof(struct wmi_cmd_hdr
)) == NULL
)
216 cmd
|= FIELD_PREP(WMI_CMD_HDR_CMD_ID
, cmd_id
);
218 cmd_hdr
= (struct wmi_cmd_hdr
*)skb
->data
;
219 cmd_hdr
->cmd_id
= cmd
;
221 memset(skb_cb
, 0, sizeof(*skb_cb
));
222 ret
= ath11k_htc_send(&ab
->htc
, wmi
->eid
, skb
);
230 skb_pull(skb
, sizeof(struct wmi_cmd_hdr
));
234 int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi
*wmi
, struct sk_buff
*skb
,
237 struct ath11k_wmi_base
*wmi_sc
= wmi
->wmi_ab
;
238 int ret
= -EOPNOTSUPP
;
242 wait_event_timeout(wmi_sc
->tx_credits_wq
, ({
243 ret
= ath11k_wmi_cmd_send_nowait(wmi
, skb
, cmd_id
);
245 if (ret
&& test_bit(ATH11K_FLAG_CRASH_FLUSH
, &wmi_sc
->ab
->dev_flags
))
249 }), WMI_SEND_TIMEOUT_HZ
);
252 ath11k_warn(wmi_sc
->ab
, "wmi command %d timeout\n", cmd_id
);
257 static int ath11k_pull_svc_ready_ext(struct ath11k_pdev_wmi
*wmi_handle
,
259 struct ath11k_service_ext_param
*param
)
261 const struct wmi_service_ready_ext_event
*ev
= ptr
;
266 /* Move this to host based bitmap */
267 param
->default_conc_scan_config_bits
= ev
->default_conc_scan_config_bits
;
268 param
->default_fw_config_bits
= ev
->default_fw_config_bits
;
269 param
->he_cap_info
= ev
->he_cap_info
;
270 param
->mpdu_density
= ev
->mpdu_density
;
271 param
->max_bssid_rx_filters
= ev
->max_bssid_rx_filters
;
272 memcpy(¶m
->ppet
, &ev
->ppet
, sizeof(param
->ppet
));
278 ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi
*wmi_handle
,
279 struct wmi_soc_mac_phy_hw_mode_caps
*hw_caps
,
280 struct wmi_hw_mode_capabilities
*wmi_hw_mode_caps
,
281 struct wmi_soc_hal_reg_capabilities
*hal_reg_caps
,
282 struct wmi_mac_phy_capabilities
*wmi_mac_phy_caps
,
283 u8 hw_mode_id
, u8 phy_id
,
284 struct ath11k_pdev
*pdev
)
286 struct wmi_mac_phy_capabilities
*mac_phy_caps
;
287 struct ath11k_band_cap
*cap_band
;
288 struct ath11k_pdev_cap
*pdev_cap
= &pdev
->cap
;
290 u32 hw_idx
, phy_idx
= 0;
292 if (!hw_caps
|| !wmi_hw_mode_caps
|| !hal_reg_caps
)
295 for (hw_idx
= 0; hw_idx
< hw_caps
->num_hw_modes
; hw_idx
++) {
296 if (hw_mode_id
== wmi_hw_mode_caps
[hw_idx
].hw_mode_id
)
299 phy_map
= wmi_hw_mode_caps
[hw_idx
].phy_id_map
;
306 if (hw_idx
== hw_caps
->num_hw_modes
)
310 if (phy_id
>= hal_reg_caps
->num_phy
)
313 mac_phy_caps
= wmi_mac_phy_caps
+ phy_idx
;
315 pdev
->pdev_id
= mac_phy_caps
->pdev_id
;
316 pdev_cap
->supported_bands
= mac_phy_caps
->supported_bands
;
317 pdev_cap
->ampdu_density
= mac_phy_caps
->ampdu_density
;
319 /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
320 * band to band for a single radio, need to see how this should be
323 if (mac_phy_caps
->supported_bands
& WMI_HOST_WLAN_2G_CAP
) {
324 pdev_cap
->tx_chain_mask
= mac_phy_caps
->tx_chain_mask_2g
;
325 pdev_cap
->rx_chain_mask
= mac_phy_caps
->rx_chain_mask_2g
;
326 } else if (mac_phy_caps
->supported_bands
& WMI_HOST_WLAN_5G_CAP
) {
327 pdev_cap
->vht_cap
= mac_phy_caps
->vht_cap_info_5g
;
328 pdev_cap
->vht_mcs
= mac_phy_caps
->vht_supp_mcs_5g
;
329 pdev_cap
->he_mcs
= mac_phy_caps
->he_supp_mcs_5g
;
330 pdev_cap
->tx_chain_mask
= mac_phy_caps
->tx_chain_mask_5g
;
331 pdev_cap
->rx_chain_mask
= mac_phy_caps
->rx_chain_mask_5g
;
336 /* tx/rx chainmask reported from fw depends on the actual hw chains used,
337 * For example, for 4x4 capable macphys, first 4 chains can be used for first
338 * mac and the remaing 4 chains can be used for the second mac or vice-versa.
339 * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
340 * will be advertised for second mac or vice-versa. Compute the shift value for
341 * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
344 pdev_cap
->tx_chain_mask_shift
=
345 find_first_bit((unsigned long *)&pdev_cap
->tx_chain_mask
, 32);
346 pdev_cap
->rx_chain_mask_shift
=
347 find_first_bit((unsigned long *)&pdev_cap
->rx_chain_mask
, 32);
349 cap_band
= &pdev_cap
->band
[NL80211_BAND_2GHZ
];
350 cap_band
->max_bw_supported
= mac_phy_caps
->max_bw_supported_2g
;
351 cap_band
->ht_cap_info
= mac_phy_caps
->ht_cap_info_2g
;
352 cap_band
->he_cap_info
[0] = mac_phy_caps
->he_cap_info_2g
;
353 cap_band
->he_cap_info
[1] = mac_phy_caps
->he_cap_info_2g_ext
;
354 cap_band
->he_mcs
= mac_phy_caps
->he_supp_mcs_2g
;
355 memcpy(cap_band
->he_cap_phy_info
, &mac_phy_caps
->he_cap_phy_info_2g
,
356 sizeof(u32
) * PSOC_HOST_MAX_PHY_SIZE
);
357 memcpy(&cap_band
->he_ppet
, &mac_phy_caps
->he_ppet2g
,
358 sizeof(struct ath11k_ppe_threshold
));
360 cap_band
= &pdev_cap
->band
[NL80211_BAND_5GHZ
];
361 cap_band
->max_bw_supported
= mac_phy_caps
->max_bw_supported_5g
;
362 cap_band
->ht_cap_info
= mac_phy_caps
->ht_cap_info_5g
;
363 cap_band
->he_cap_info
[0] = mac_phy_caps
->he_cap_info_5g
;
364 cap_band
->he_cap_info
[1] = mac_phy_caps
->he_cap_info_5g_ext
;
365 cap_band
->he_mcs
= mac_phy_caps
->he_supp_mcs_5g
;
366 memcpy(cap_band
->he_cap_phy_info
, &mac_phy_caps
->he_cap_phy_info_5g
,
367 sizeof(u32
) * PSOC_HOST_MAX_PHY_SIZE
);
368 memcpy(&cap_band
->he_ppet
, &mac_phy_caps
->he_ppet5g
,
369 sizeof(struct ath11k_ppe_threshold
));
375 ath11k_pull_reg_cap_svc_rdy_ext(struct ath11k_pdev_wmi
*wmi_handle
,
376 struct wmi_soc_hal_reg_capabilities
*reg_caps
,
377 struct wmi_hal_reg_capabilities_ext
*wmi_ext_reg_cap
,
379 struct ath11k_hal_reg_capabilities_ext
*param
)
381 struct wmi_hal_reg_capabilities_ext
*ext_reg_cap
;
383 if (!reg_caps
|| !wmi_ext_reg_cap
)
386 if (phy_idx
>= reg_caps
->num_phy
)
389 ext_reg_cap
= &wmi_ext_reg_cap
[phy_idx
];
391 param
->phy_id
= ext_reg_cap
->phy_id
;
392 param
->eeprom_reg_domain
= ext_reg_cap
->eeprom_reg_domain
;
393 param
->eeprom_reg_domain_ext
=
394 ext_reg_cap
->eeprom_reg_domain_ext
;
395 param
->regcap1
= ext_reg_cap
->regcap1
;
396 param
->regcap2
= ext_reg_cap
->regcap2
;
397 /* check if param->wireless_mode is needed */
398 param
->low_2ghz_chan
= ext_reg_cap
->low_2ghz_chan
;
399 param
->high_2ghz_chan
= ext_reg_cap
->high_2ghz_chan
;
400 param
->low_5ghz_chan
= ext_reg_cap
->low_5ghz_chan
;
401 param
->high_5ghz_chan
= ext_reg_cap
->high_5ghz_chan
;
406 static int ath11k_pull_service_ready_tlv(struct ath11k_base
*ab
,
408 struct ath11k_targ_cap
*cap
)
410 const struct wmi_service_ready_event
*ev
= evt_buf
;
413 ath11k_err(ab
, "%s: failed by NULL param\n",
418 cap
->phy_capability
= ev
->phy_capability
;
419 cap
->max_frag_entry
= ev
->max_frag_entry
;
420 cap
->num_rf_chains
= ev
->num_rf_chains
;
421 cap
->ht_cap_info
= ev
->ht_cap_info
;
422 cap
->vht_cap_info
= ev
->vht_cap_info
;
423 cap
->vht_supp_mcs
= ev
->vht_supp_mcs
;
424 cap
->hw_min_tx_power
= ev
->hw_min_tx_power
;
425 cap
->hw_max_tx_power
= ev
->hw_max_tx_power
;
426 cap
->sys_cap_info
= ev
->sys_cap_info
;
427 cap
->min_pkt_size_enable
= ev
->min_pkt_size_enable
;
428 cap
->max_bcn_ie_size
= ev
->max_bcn_ie_size
;
429 cap
->max_num_scan_channels
= ev
->max_num_scan_channels
;
430 cap
->max_supported_macs
= ev
->max_supported_macs
;
431 cap
->wmi_fw_sub_feat_caps
= ev
->wmi_fw_sub_feat_caps
;
432 cap
->txrx_chainmask
= ev
->txrx_chainmask
;
433 cap
->default_dbs_hw_mode_index
= ev
->default_dbs_hw_mode_index
;
434 cap
->num_msdu_desc
= ev
->num_msdu_desc
;
439 /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
440 * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
443 static void ath11k_wmi_service_bitmap_copy(struct ath11k_pdev_wmi
*wmi
,
444 const u32
*wmi_svc_bm
)
448 for (i
= 0, j
= 0; i
< WMI_SERVICE_BM_SIZE
&& j
< WMI_MAX_SERVICE
; i
++) {
450 if (wmi_svc_bm
[i
] & BIT(j
% WMI_SERVICE_BITS_IN_SIZE32
))
451 set_bit(j
, wmi
->wmi_ab
->svc_map
);
452 } while (++j
% WMI_SERVICE_BITS_IN_SIZE32
);
456 static int ath11k_wmi_tlv_svc_rdy_parse(struct ath11k_base
*ab
, u16 tag
, u16 len
,
457 const void *ptr
, void *data
)
459 struct wmi_tlv_svc_ready_parse
*svc_ready
= data
;
460 struct ath11k_pdev_wmi
*wmi_handle
= &ab
->wmi_ab
.wmi
[0];
464 case WMI_TAG_SERVICE_READY_EVENT
:
465 if (ath11k_pull_service_ready_tlv(ab
, ptr
, &ab
->target_caps
))
469 case WMI_TAG_ARRAY_UINT32
:
470 if (!svc_ready
->wmi_svc_bitmap_done
) {
471 expect_len
= WMI_SERVICE_BM_SIZE
* sizeof(u32
);
472 if (len
< expect_len
) {
473 ath11k_warn(ab
, "invalid len %d for the tag 0x%x\n",
478 ath11k_wmi_service_bitmap_copy(wmi_handle
, ptr
);
480 svc_ready
->wmi_svc_bitmap_done
= true;
490 static int ath11k_service_ready_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
492 struct wmi_tlv_svc_ready_parse svc_ready
= { };
495 ret
= ath11k_wmi_tlv_iter(ab
, skb
->data
, skb
->len
,
496 ath11k_wmi_tlv_svc_rdy_parse
,
499 ath11k_warn(ab
, "failed to parse tlv %d\n", ret
);
506 struct sk_buff
*ath11k_wmi_alloc_skb(struct ath11k_wmi_base
*wmi_sc
, u32 len
)
509 struct ath11k_base
*ab
= wmi_sc
->ab
;
510 u32 round_len
= roundup(len
, 4);
512 skb
= ath11k_htc_alloc_skb(ab
, WMI_SKB_HEADROOM
+ round_len
);
516 skb_reserve(skb
, WMI_SKB_HEADROOM
);
517 if (!IS_ALIGNED((unsigned long)skb
->data
, 4))
518 ath11k_warn(ab
, "unaligned WMI skb data\n");
520 skb_put(skb
, round_len
);
521 memset(skb
->data
, 0, round_len
);
526 int ath11k_wmi_mgmt_send(struct ath11k
*ar
, u32 vdev_id
, u32 buf_id
,
527 struct sk_buff
*frame
)
529 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
530 struct wmi_mgmt_send_cmd
*cmd
;
531 struct wmi_tlv
*frame_tlv
;
536 buf_len
= frame
->len
< WMI_MGMT_SEND_DOWNLD_LEN
?
537 frame
->len
: WMI_MGMT_SEND_DOWNLD_LEN
;
539 len
= sizeof(*cmd
) + sizeof(*frame_tlv
) + roundup(buf_len
, 4);
541 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
545 cmd
= (struct wmi_mgmt_send_cmd
*)skb
->data
;
546 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_MGMT_TX_SEND_CMD
) |
547 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
548 cmd
->vdev_id
= vdev_id
;
549 cmd
->desc_id
= buf_id
;
551 cmd
->paddr_lo
= lower_32_bits(ATH11K_SKB_CB(frame
)->paddr
);
552 cmd
->paddr_hi
= upper_32_bits(ATH11K_SKB_CB(frame
)->paddr
);
553 cmd
->frame_len
= frame
->len
;
554 cmd
->buf_len
= buf_len
;
555 cmd
->tx_params_valid
= 0;
557 frame_tlv
= (struct wmi_tlv
*)(skb
->data
+ sizeof(*cmd
));
558 frame_tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_BYTE
) |
559 FIELD_PREP(WMI_TLV_LEN
, buf_len
);
561 memcpy(frame_tlv
->value
, frame
->data
, buf_len
);
563 ath11k_ce_byte_swap(frame_tlv
->value
, buf_len
);
565 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_MGMT_TX_SEND_CMDID
);
568 "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n");
575 int ath11k_wmi_vdev_create(struct ath11k
*ar
, u8
*macaddr
,
576 struct vdev_create_params
*param
)
578 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
579 struct wmi_vdev_create_cmd
*cmd
;
581 struct wmi_vdev_txrx_streams
*txrx_streams
;
586 /* It can be optimized my sending tx/rx chain configuration
587 * only for supported bands instead of always sending it for
590 len
= sizeof(*cmd
) + TLV_HDR_SIZE
+
591 (WMI_NUM_SUPPORTED_BAND_MAX
* sizeof(*txrx_streams
));
593 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
597 cmd
= (struct wmi_vdev_create_cmd
*)skb
->data
;
598 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_CREATE_CMD
) |
599 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
601 cmd
->vdev_id
= param
->if_id
;
602 cmd
->vdev_type
= param
->type
;
603 cmd
->vdev_subtype
= param
->subtype
;
604 cmd
->num_cfg_txrx_streams
= WMI_NUM_SUPPORTED_BAND_MAX
;
605 cmd
->pdev_id
= param
->pdev_id
;
606 cmd
->flags
= param
->flags
;
607 cmd
->vdevid_trans
= param
->vdevid_trans
;
608 ether_addr_copy(cmd
->vdev_macaddr
.addr
, macaddr
);
610 ptr
= skb
->data
+ sizeof(*cmd
);
611 len
= WMI_NUM_SUPPORTED_BAND_MAX
* sizeof(*txrx_streams
);
614 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
615 FIELD_PREP(WMI_TLV_LEN
, len
);
619 len
= sizeof(*txrx_streams
);
620 txrx_streams
->tlv_header
=
621 FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_TXRX_STREAMS
) |
622 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
623 txrx_streams
->band
= WMI_TPC_CHAINMASK_CONFIG_BAND_2G
;
624 txrx_streams
->supported_tx_streams
=
625 param
->chains
[NL80211_BAND_2GHZ
].tx
;
626 txrx_streams
->supported_rx_streams
=
627 param
->chains
[NL80211_BAND_2GHZ
].rx
;
630 txrx_streams
->tlv_header
=
631 FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_TXRX_STREAMS
) |
632 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
633 txrx_streams
->band
= WMI_TPC_CHAINMASK_CONFIG_BAND_5G
;
634 txrx_streams
->supported_tx_streams
=
635 param
->chains
[NL80211_BAND_5GHZ
].tx
;
636 txrx_streams
->supported_rx_streams
=
637 param
->chains
[NL80211_BAND_5GHZ
].rx
;
639 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_VDEV_CREATE_CMDID
);
642 "failed to submit WMI_VDEV_CREATE_CMDID\n");
646 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
647 "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
648 param
->if_id
, param
->type
, param
->subtype
,
649 macaddr
, param
->pdev_id
);
654 int ath11k_wmi_vdev_delete(struct ath11k
*ar
, u8 vdev_id
)
656 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
657 struct wmi_vdev_delete_cmd
*cmd
;
661 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
665 cmd
= (struct wmi_vdev_delete_cmd
*)skb
->data
;
666 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_DELETE_CMD
) |
667 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
668 cmd
->vdev_id
= vdev_id
;
670 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_VDEV_DELETE_CMDID
);
672 ath11k_warn(ar
->ab
, "failed to submit WMI_VDEV_DELETE_CMDID\n");
676 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
, "WMI vdev delete id %d\n", vdev_id
);
681 int ath11k_wmi_vdev_stop(struct ath11k
*ar
, u8 vdev_id
)
683 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
684 struct wmi_vdev_stop_cmd
*cmd
;
688 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
692 cmd
= (struct wmi_vdev_stop_cmd
*)skb
->data
;
694 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_STOP_CMD
) |
695 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
696 cmd
->vdev_id
= vdev_id
;
698 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_VDEV_STOP_CMDID
);
700 ath11k_warn(ar
->ab
, "failed to submit WMI_VDEV_STOP cmd\n");
704 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
, "WMI vdev stop id 0x%x\n", vdev_id
);
709 int ath11k_wmi_vdev_down(struct ath11k
*ar
, u8 vdev_id
)
711 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
712 struct wmi_vdev_down_cmd
*cmd
;
716 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
720 cmd
= (struct wmi_vdev_down_cmd
*)skb
->data
;
722 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_DOWN_CMD
) |
723 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
724 cmd
->vdev_id
= vdev_id
;
726 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_VDEV_DOWN_CMDID
);
728 ath11k_warn(ar
->ab
, "failed to submit WMI_VDEV_DOWN cmd\n");
732 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
, "WMI vdev down id 0x%x\n", vdev_id
);
737 static void ath11k_wmi_put_wmi_channel(struct wmi_channel
*chan
,
738 struct wmi_vdev_start_req_arg
*arg
)
740 memset(chan
, 0, sizeof(*chan
));
742 chan
->mhz
= arg
->channel
.freq
;
743 chan
->band_center_freq1
= arg
->channel
.band_center_freq1
;
744 if (arg
->channel
.mode
== MODE_11AC_VHT80_80
)
745 chan
->band_center_freq2
= arg
->channel
.band_center_freq2
;
747 chan
->band_center_freq2
= 0;
749 chan
->info
|= FIELD_PREP(WMI_CHAN_INFO_MODE
, arg
->channel
.mode
);
750 if (arg
->channel
.passive
)
751 chan
->info
|= WMI_CHAN_INFO_PASSIVE
;
752 if (arg
->channel
.allow_ibss
)
753 chan
->info
|= WMI_CHAN_INFO_ADHOC_ALLOWED
;
754 if (arg
->channel
.allow_ht
)
755 chan
->info
|= WMI_CHAN_INFO_ALLOW_HT
;
756 if (arg
->channel
.allow_vht
)
757 chan
->info
|= WMI_CHAN_INFO_ALLOW_VHT
;
758 if (arg
->channel
.allow_he
)
759 chan
->info
|= WMI_CHAN_INFO_ALLOW_HE
;
760 if (arg
->channel
.ht40plus
)
761 chan
->info
|= WMI_CHAN_INFO_HT40_PLUS
;
762 if (arg
->channel
.chan_radar
)
763 chan
->info
|= WMI_CHAN_INFO_DFS
;
764 if (arg
->channel
.freq2_radar
)
765 chan
->info
|= WMI_CHAN_INFO_DFS_FREQ2
;
767 chan
->reg_info_1
= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR
,
768 arg
->channel
.max_power
) |
769 FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR
,
770 arg
->channel
.max_reg_power
);
772 chan
->reg_info_2
= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX
,
773 arg
->channel
.max_antenna_gain
) |
774 FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR
,
775 arg
->channel
.max_power
);
778 int ath11k_wmi_vdev_start(struct ath11k
*ar
, struct wmi_vdev_start_req_arg
*arg
,
781 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
782 struct wmi_vdev_start_request_cmd
*cmd
;
784 struct wmi_channel
*chan
;
789 if (WARN_ON(arg
->ssid_len
> sizeof(cmd
->ssid
.ssid
)))
792 len
= sizeof(*cmd
) + sizeof(*chan
) + TLV_HDR_SIZE
;
794 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
798 cmd
= (struct wmi_vdev_start_request_cmd
*)skb
->data
;
799 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
800 WMI_TAG_VDEV_START_REQUEST_CMD
) |
801 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
802 cmd
->vdev_id
= arg
->vdev_id
;
803 cmd
->beacon_interval
= arg
->bcn_intval
;
804 cmd
->bcn_tx_rate
= arg
->bcn_tx_rate
;
805 cmd
->dtim_period
= arg
->dtim_period
;
806 cmd
->num_noa_descriptors
= arg
->num_noa_descriptors
;
807 cmd
->preferred_rx_streams
= arg
->pref_rx_streams
;
808 cmd
->preferred_tx_streams
= arg
->pref_tx_streams
;
809 cmd
->cac_duration_ms
= arg
->cac_duration_ms
;
810 cmd
->regdomain
= arg
->regdomain
;
811 cmd
->he_ops
= arg
->he_ops
;
815 cmd
->ssid
.ssid_len
= arg
->ssid_len
;
816 memcpy(cmd
->ssid
.ssid
, arg
->ssid
, arg
->ssid_len
);
818 if (arg
->hidden_ssid
)
819 cmd
->flags
|= WMI_VDEV_START_HIDDEN_SSID
;
820 if (arg
->pmf_enabled
)
821 cmd
->flags
|= WMI_VDEV_START_PMF_ENABLED
;
824 cmd
->flags
|= WMI_VDEV_START_LDPC_RX_ENABLED
;
826 ptr
= skb
->data
+ sizeof(*cmd
);
829 ath11k_wmi_put_wmi_channel(chan
, arg
);
831 chan
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_CHANNEL
) |
832 FIELD_PREP(WMI_TLV_LEN
,
833 sizeof(*chan
) - TLV_HDR_SIZE
);
834 ptr
+= sizeof(*chan
);
837 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
838 FIELD_PREP(WMI_TLV_LEN
, 0);
840 /* Note: This is a nested TLV containing:
841 * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
847 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
848 WMI_VDEV_RESTART_REQUEST_CMDID
);
850 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
851 WMI_VDEV_START_REQUEST_CMDID
);
853 ath11k_warn(ar
->ab
, "failed to submit vdev_%s cmd\n",
854 restart
? "restart" : "start");
858 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
859 restart
? "restart" : "start", arg
->vdev_id
,
860 arg
->channel
.freq
, arg
->channel
.mode
);
865 int ath11k_wmi_vdev_up(struct ath11k
*ar
, struct vdev_up_params
*params
)
867 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
868 struct wmi_vdev_up_cmd
*cmd
;
872 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
876 cmd
= (struct wmi_vdev_up_cmd
*)skb
->data
;
878 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_UP_CMD
) |
879 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
880 cmd
->vdev_id
= params
->vdev_id
;
881 cmd
->vdev_assoc_id
= params
->aid
;
882 cmd
->profile_idx
= params
->profile_idx
;
883 cmd
->profile_num
= params
->profile_num
;
885 if (params
->trans_bssid
)
886 ether_addr_copy(cmd
->trans_bssid
.addr
, params
->trans_bssid
);
887 ether_addr_copy(cmd
->vdev_bssid
.addr
, params
->bssid
);
889 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_VDEV_UP_CMDID
);
891 ath11k_warn(ar
->ab
, "failed to submit WMI_VDEV_UP cmd\n");
895 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
896 "WMI mgmt vdev up id 0x%x assoc id %d idx %d num %d bssid %pM trans_bssid %pM\n",
897 params
->vdev_id
, params
->aid
, params
->profile_idx
, params
->profile_num
,
898 params
->bssid
, params
->trans_bssid
);
903 int ath11k_wmi_send_peer_create_cmd(struct ath11k
*ar
,
904 struct peer_create_params
*param
)
906 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
907 struct wmi_peer_create_cmd
*cmd
;
911 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
915 cmd
= (struct wmi_peer_create_cmd
*)skb
->data
;
916 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PEER_CREATE_CMD
) |
917 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
919 ether_addr_copy(cmd
->peer_macaddr
.addr
, param
->peer_addr
);
920 cmd
->peer_type
= param
->peer_type
;
921 cmd
->vdev_id
= param
->vdev_id
;
923 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PEER_CREATE_CMDID
);
925 ath11k_warn(ar
->ab
, "failed to submit WMI_PEER_CREATE cmd\n");
929 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
930 "WMI peer create vdev_id %d peer_addr %pM\n",
931 param
->vdev_id
, param
->peer_addr
);
936 int ath11k_wmi_send_peer_delete_cmd(struct ath11k
*ar
,
937 const u8
*peer_addr
, u8 vdev_id
)
939 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
940 struct wmi_peer_delete_cmd
*cmd
;
944 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
948 cmd
= (struct wmi_peer_delete_cmd
*)skb
->data
;
949 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PEER_DELETE_CMD
) |
950 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
952 ether_addr_copy(cmd
->peer_macaddr
.addr
, peer_addr
);
953 cmd
->vdev_id
= vdev_id
;
955 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
956 "WMI peer delete vdev_id %d peer_addr %pM\n",
959 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PEER_DELETE_CMDID
);
961 ath11k_warn(ar
->ab
, "failed to send WMI_PEER_DELETE cmd\n");
968 int ath11k_wmi_send_pdev_set_regdomain(struct ath11k
*ar
,
969 struct pdev_set_regdomain_params
*param
)
971 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
972 struct wmi_pdev_set_regdomain_cmd
*cmd
;
976 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
980 cmd
= (struct wmi_pdev_set_regdomain_cmd
*)skb
->data
;
981 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
982 WMI_TAG_PDEV_SET_REGDOMAIN_CMD
) |
983 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
985 cmd
->reg_domain
= param
->current_rd_in_use
;
986 cmd
->reg_domain_2g
= param
->current_rd_2g
;
987 cmd
->reg_domain_5g
= param
->current_rd_5g
;
988 cmd
->conformance_test_limit_2g
= param
->ctl_2g
;
989 cmd
->conformance_test_limit_5g
= param
->ctl_5g
;
990 cmd
->dfs_domain
= param
->dfs_domain
;
991 cmd
->pdev_id
= param
->pdev_id
;
993 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
994 "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n",
995 param
->current_rd_in_use
, param
->current_rd_2g
,
996 param
->current_rd_5g
, param
->dfs_domain
, param
->pdev_id
);
998 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PDEV_SET_REGDOMAIN_CMDID
);
1001 "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n");
1008 int ath11k_wmi_set_peer_param(struct ath11k
*ar
, const u8
*peer_addr
,
1009 u32 vdev_id
, u32 param_id
, u32 param_val
)
1011 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1012 struct wmi_peer_set_param_cmd
*cmd
;
1013 struct sk_buff
*skb
;
1016 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1020 cmd
= (struct wmi_peer_set_param_cmd
*)skb
->data
;
1021 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PEER_SET_PARAM_CMD
) |
1022 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1023 ether_addr_copy(cmd
->peer_macaddr
.addr
, peer_addr
);
1024 cmd
->vdev_id
= vdev_id
;
1025 cmd
->param_id
= param_id
;
1026 cmd
->param_value
= param_val
;
1028 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PEER_SET_PARAM_CMDID
);
1030 ath11k_warn(ar
->ab
, "failed to send WMI_PEER_SET_PARAM cmd\n");
1034 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1035 "WMI vdev %d peer 0x%pM set param %d value %d\n",
1036 vdev_id
, peer_addr
, param_id
, param_val
);
1041 int ath11k_wmi_send_peer_flush_tids_cmd(struct ath11k
*ar
,
1042 u8 peer_addr
[ETH_ALEN
],
1043 struct peer_flush_params
*param
)
1045 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1046 struct wmi_peer_flush_tids_cmd
*cmd
;
1047 struct sk_buff
*skb
;
1050 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1054 cmd
= (struct wmi_peer_flush_tids_cmd
*)skb
->data
;
1055 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PEER_FLUSH_TIDS_CMD
) |
1056 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1058 ether_addr_copy(cmd
->peer_macaddr
.addr
, peer_addr
);
1059 cmd
->peer_tid_bitmap
= param
->peer_tid_bitmap
;
1060 cmd
->vdev_id
= param
->vdev_id
;
1062 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PEER_FLUSH_TIDS_CMDID
);
1065 "failed to send WMI_PEER_FLUSH_TIDS cmd\n");
1069 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1070 "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n",
1071 param
->vdev_id
, peer_addr
, param
->peer_tid_bitmap
);
1076 int ath11k_wmi_peer_rx_reorder_queue_setup(struct ath11k
*ar
,
1077 int vdev_id
, const u8
*addr
,
1078 dma_addr_t paddr
, u8 tid
,
1079 u8 ba_window_size_valid
,
1082 struct wmi_peer_reorder_queue_setup_cmd
*cmd
;
1083 struct sk_buff
*skb
;
1086 skb
= ath11k_wmi_alloc_skb(ar
->wmi
->wmi_ab
, sizeof(*cmd
));
1090 cmd
= (struct wmi_peer_reorder_queue_setup_cmd
*)skb
->data
;
1091 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1092 WMI_TAG_REORDER_QUEUE_SETUP_CMD
) |
1093 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1095 ether_addr_copy(cmd
->peer_macaddr
.addr
, addr
);
1096 cmd
->vdev_id
= vdev_id
;
1098 cmd
->queue_ptr_lo
= lower_32_bits(paddr
);
1099 cmd
->queue_ptr_hi
= upper_32_bits(paddr
);
1100 cmd
->queue_no
= tid
;
1101 cmd
->ba_window_size_valid
= ba_window_size_valid
;
1102 cmd
->ba_window_size
= ba_window_size
;
1104 ret
= ath11k_wmi_cmd_send(ar
->wmi
, skb
,
1105 WMI_PEER_REORDER_QUEUE_SETUP_CMDID
);
1108 "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n");
1112 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1113 "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n",
1114 addr
, vdev_id
, tid
);
1120 ath11k_wmi_rx_reord_queue_remove(struct ath11k
*ar
,
1121 struct rx_reorder_queue_remove_params
*param
)
1123 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1124 struct wmi_peer_reorder_queue_remove_cmd
*cmd
;
1125 struct sk_buff
*skb
;
1128 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1132 cmd
= (struct wmi_peer_reorder_queue_remove_cmd
*)skb
->data
;
1133 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1134 WMI_TAG_REORDER_QUEUE_REMOVE_CMD
) |
1135 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1137 ether_addr_copy(cmd
->peer_macaddr
.addr
, param
->peer_macaddr
);
1138 cmd
->vdev_id
= param
->vdev_id
;
1139 cmd
->tid_mask
= param
->peer_tid_bitmap
;
1141 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1142 "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__
,
1143 param
->peer_macaddr
, param
->vdev_id
, param
->peer_tid_bitmap
);
1145 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
1146 WMI_PEER_REORDER_QUEUE_REMOVE_CMDID
);
1149 "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID");
1156 int ath11k_wmi_pdev_set_param(struct ath11k
*ar
, u32 param_id
,
1157 u32 param_value
, u8 pdev_id
)
1159 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1160 struct wmi_pdev_set_param_cmd
*cmd
;
1161 struct sk_buff
*skb
;
1164 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1168 cmd
= (struct wmi_pdev_set_param_cmd
*)skb
->data
;
1169 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_SET_PARAM_CMD
) |
1170 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1171 cmd
->pdev_id
= pdev_id
;
1172 cmd
->param_id
= param_id
;
1173 cmd
->param_value
= param_value
;
1175 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PDEV_SET_PARAM_CMDID
);
1177 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1181 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1182 "WMI pdev set param %d pdev id %d value %d\n",
1183 param_id
, pdev_id
, param_value
);
1188 int ath11k_wmi_pdev_set_ps_mode(struct ath11k
*ar
, int vdev_id
, u32 enable
)
1190 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1191 struct wmi_pdev_set_ps_mode_cmd
*cmd
;
1192 struct sk_buff
*skb
;
1195 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1199 cmd
= (struct wmi_pdev_set_ps_mode_cmd
*)skb
->data
;
1200 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_STA_POWERSAVE_MODE_CMD
) |
1201 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1202 cmd
->vdev_id
= vdev_id
;
1203 cmd
->sta_ps_mode
= enable
;
1205 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_STA_POWERSAVE_MODE_CMDID
);
1207 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1211 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1212 "WMI vdev set psmode %d vdev id %d\n",
1218 int ath11k_wmi_pdev_suspend(struct ath11k
*ar
, u32 suspend_opt
,
1221 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1222 struct wmi_pdev_suspend_cmd
*cmd
;
1223 struct sk_buff
*skb
;
1226 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1230 cmd
= (struct wmi_pdev_suspend_cmd
*)skb
->data
;
1232 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_SUSPEND_CMD
) |
1233 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1235 cmd
->suspend_opt
= suspend_opt
;
1236 cmd
->pdev_id
= pdev_id
;
1238 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PDEV_SUSPEND_CMDID
);
1240 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_SUSPEND cmd\n");
1244 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1245 "WMI pdev suspend pdev_id %d\n", pdev_id
);
1250 int ath11k_wmi_pdev_resume(struct ath11k
*ar
, u32 pdev_id
)
1252 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1253 struct wmi_pdev_resume_cmd
*cmd
;
1254 struct sk_buff
*skb
;
1257 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1261 cmd
= (struct wmi_pdev_resume_cmd
*)skb
->data
;
1263 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_RESUME_CMD
) |
1264 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1265 cmd
->pdev_id
= pdev_id
;
1267 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1268 "WMI pdev resume pdev id %d\n", pdev_id
);
1270 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PDEV_RESUME_CMDID
);
1272 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_RESUME cmd\n");
1279 /* TODO FW Support for the cmd is not available yet.
1280 * Can be tested once the command and corresponding
1281 * event is implemented in FW
1283 int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k
*ar
,
1284 enum wmi_bss_chan_info_req_type type
)
1286 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1287 struct wmi_pdev_bss_chan_info_req_cmd
*cmd
;
1288 struct sk_buff
*skb
;
1291 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1295 cmd
= (struct wmi_pdev_bss_chan_info_req_cmd
*)skb
->data
;
1297 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1298 WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST
) |
1299 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1300 cmd
->req_type
= type
;
1302 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1303 "WMI bss chan info req type %d\n", type
);
1305 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
1306 WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID
);
1309 "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n");
1316 int ath11k_wmi_send_set_ap_ps_param_cmd(struct ath11k
*ar
, u8
*peer_addr
,
1317 struct ap_ps_params
*param
)
1319 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1320 struct wmi_ap_ps_peer_cmd
*cmd
;
1321 struct sk_buff
*skb
;
1324 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1328 cmd
= (struct wmi_ap_ps_peer_cmd
*)skb
->data
;
1329 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_AP_PS_PEER_CMD
) |
1330 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1332 cmd
->vdev_id
= param
->vdev_id
;
1333 ether_addr_copy(cmd
->peer_macaddr
.addr
, peer_addr
);
1334 cmd
->param
= param
->param
;
1335 cmd
->value
= param
->value
;
1337 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_AP_PS_PEER_PARAM_CMDID
);
1340 "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n");
1344 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1345 "WMI set ap ps vdev id %d peer %pM param %d value %d\n",
1346 param
->vdev_id
, peer_addr
, param
->param
, param
->value
);
1351 int ath11k_wmi_set_sta_ps_param(struct ath11k
*ar
, u32 vdev_id
,
1352 u32 param
, u32 param_value
)
1354 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1355 struct wmi_sta_powersave_param_cmd
*cmd
;
1356 struct sk_buff
*skb
;
1359 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1363 cmd
= (struct wmi_sta_powersave_param_cmd
*)skb
->data
;
1364 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1365 WMI_TAG_STA_POWERSAVE_PARAM_CMD
) |
1366 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1368 cmd
->vdev_id
= vdev_id
;
1370 cmd
->value
= param_value
;
1372 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1373 "WMI set sta ps vdev_id %d param %d value %d\n",
1374 vdev_id
, param
, param_value
);
1376 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_STA_POWERSAVE_PARAM_CMDID
);
1378 ath11k_warn(ar
->ab
, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID");
1385 int ath11k_wmi_force_fw_hang_cmd(struct ath11k
*ar
, u32 type
, u32 delay_time_ms
)
1387 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1388 struct wmi_force_fw_hang_cmd
*cmd
;
1389 struct sk_buff
*skb
;
1394 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
1398 cmd
= (struct wmi_force_fw_hang_cmd
*)skb
->data
;
1399 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_FORCE_FW_HANG_CMD
) |
1400 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
1403 cmd
->delay_time_ms
= delay_time_ms
;
1405 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_FORCE_FW_HANG_CMDID
);
1408 ath11k_warn(ar
->ab
, "Failed to send WMI_FORCE_FW_HANG_CMDID");
1414 int ath11k_wmi_vdev_set_param_cmd(struct ath11k
*ar
, u32 vdev_id
,
1415 u32 param_id
, u32 param_value
)
1417 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1418 struct wmi_vdev_set_param_cmd
*cmd
;
1419 struct sk_buff
*skb
;
1422 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1426 cmd
= (struct wmi_vdev_set_param_cmd
*)skb
->data
;
1427 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_SET_PARAM_CMD
) |
1428 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1430 cmd
->vdev_id
= vdev_id
;
1431 cmd
->param_id
= param_id
;
1432 cmd
->param_value
= param_value
;
1434 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_VDEV_SET_PARAM_CMDID
);
1437 "failed to send WMI_VDEV_SET_PARAM_CMDID\n");
1441 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1442 "WMI vdev id 0x%x set param %d value %d\n",
1443 vdev_id
, param_id
, param_value
);
1448 int ath11k_wmi_send_stats_request_cmd(struct ath11k
*ar
,
1449 struct stats_request_params
*param
)
1451 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1452 struct wmi_request_stats_cmd
*cmd
;
1453 struct sk_buff
*skb
;
1456 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1460 cmd
= (struct wmi_request_stats_cmd
*)skb
->data
;
1461 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_REQUEST_STATS_CMD
) |
1462 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1464 cmd
->stats_id
= param
->stats_id
;
1465 cmd
->vdev_id
= param
->vdev_id
;
1466 cmd
->pdev_id
= param
->pdev_id
;
1468 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_REQUEST_STATS_CMDID
);
1470 ath11k_warn(ar
->ab
, "failed to send WMI_REQUEST_STATS cmd\n");
1474 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1475 "WMI request stats 0x%x vdev id %d pdev id %d\n",
1476 param
->stats_id
, param
->vdev_id
, param
->pdev_id
);
1481 int ath11k_wmi_send_pdev_temperature_cmd(struct ath11k
*ar
)
1483 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1484 struct wmi_get_pdev_temperature_cmd
*cmd
;
1485 struct sk_buff
*skb
;
1488 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1492 cmd
= (struct wmi_get_pdev_temperature_cmd
*)skb
->data
;
1493 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_GET_TEMPERATURE_CMD
) |
1494 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1495 cmd
->pdev_id
= ar
->pdev
->pdev_id
;
1497 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PDEV_GET_TEMPERATURE_CMDID
);
1499 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n");
1503 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1504 "WMI pdev get temperature for pdev_id %d\n", ar
->pdev
->pdev_id
);
1509 int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k
*ar
,
1510 u32 vdev_id
, u32 bcn_ctrl_op
)
1512 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1513 struct wmi_bcn_offload_ctrl_cmd
*cmd
;
1514 struct sk_buff
*skb
;
1517 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1521 cmd
= (struct wmi_bcn_offload_ctrl_cmd
*)skb
->data
;
1522 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1523 WMI_TAG_BCN_OFFLOAD_CTRL_CMD
) |
1524 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1526 cmd
->vdev_id
= vdev_id
;
1527 cmd
->bcn_ctrl_op
= bcn_ctrl_op
;
1529 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1530 "WMI bcn ctrl offload vdev id %d ctrl_op %d\n",
1531 vdev_id
, bcn_ctrl_op
);
1533 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_BCN_OFFLOAD_CTRL_CMDID
);
1536 "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n");
1543 int ath11k_wmi_bcn_tmpl(struct ath11k
*ar
, u32 vdev_id
,
1544 struct ieee80211_mutable_offsets
*offs
,
1545 struct sk_buff
*bcn
)
1547 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1548 struct wmi_bcn_tmpl_cmd
*cmd
;
1549 struct wmi_bcn_prb_info
*bcn_prb_info
;
1550 struct wmi_tlv
*tlv
;
1551 struct sk_buff
*skb
;
1554 size_t aligned_len
= roundup(bcn
->len
, 4);
1556 len
= sizeof(*cmd
) + sizeof(*bcn_prb_info
) + TLV_HDR_SIZE
+ aligned_len
;
1558 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
1562 cmd
= (struct wmi_bcn_tmpl_cmd
*)skb
->data
;
1563 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_BCN_TMPL_CMD
) |
1564 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1565 cmd
->vdev_id
= vdev_id
;
1566 cmd
->tim_ie_offset
= offs
->tim_offset
;
1567 cmd
->csa_switch_count_offset
= offs
->csa_counter_offs
[0];
1568 cmd
->ext_csa_switch_count_offset
= offs
->csa_counter_offs
[1];
1569 cmd
->buf_len
= bcn
->len
;
1570 cmd
->mbssid_ie_offset
= offs
->multiple_bssid_offset
;
1572 ptr
= skb
->data
+ sizeof(*cmd
);
1575 len
= sizeof(*bcn_prb_info
);
1576 bcn_prb_info
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1577 WMI_TAG_BCN_PRB_INFO
) |
1578 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
1579 bcn_prb_info
->caps
= 0;
1580 bcn_prb_info
->erp
= 0;
1582 ptr
+= sizeof(*bcn_prb_info
);
1585 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_BYTE
) |
1586 FIELD_PREP(WMI_TLV_LEN
, aligned_len
);
1587 memcpy(tlv
->value
, bcn
->data
, bcn
->len
);
1589 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_BCN_TMPL_CMDID
);
1591 ath11k_warn(ar
->ab
, "failed to send WMI_BCN_TMPL_CMDID\n");
1598 int ath11k_wmi_vdev_install_key(struct ath11k
*ar
,
1599 struct wmi_vdev_install_key_arg
*arg
)
1601 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1602 struct wmi_vdev_install_key_cmd
*cmd
;
1603 struct wmi_tlv
*tlv
;
1604 struct sk_buff
*skb
;
1606 int key_len_aligned
= roundup(arg
->key_len
, sizeof(uint32_t));
1608 len
= sizeof(*cmd
) + TLV_HDR_SIZE
+ key_len_aligned
;
1610 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
1614 cmd
= (struct wmi_vdev_install_key_cmd
*)skb
->data
;
1615 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_INSTALL_KEY_CMD
) |
1616 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1617 cmd
->vdev_id
= arg
->vdev_id
;
1618 ether_addr_copy(cmd
->peer_macaddr
.addr
, arg
->macaddr
);
1619 cmd
->key_idx
= arg
->key_idx
;
1620 cmd
->key_flags
= arg
->key_flags
;
1621 cmd
->key_cipher
= arg
->key_cipher
;
1622 cmd
->key_len
= arg
->key_len
;
1623 cmd
->key_txmic_len
= arg
->key_txmic_len
;
1624 cmd
->key_rxmic_len
= arg
->key_rxmic_len
;
1626 if (arg
->key_rsc_counter
)
1627 memcpy(&cmd
->key_rsc_counter
, &arg
->key_rsc_counter
,
1628 sizeof(struct wmi_key_seq_counter
));
1630 tlv
= (struct wmi_tlv
*)(skb
->data
+ sizeof(*cmd
));
1631 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_BYTE
) |
1632 FIELD_PREP(WMI_TLV_LEN
, key_len_aligned
);
1633 memcpy(tlv
->value
, (u8
*)arg
->key_data
, key_len_aligned
);
1635 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_VDEV_INSTALL_KEY_CMDID
);
1638 "failed to send WMI_VDEV_INSTALL_KEY cmd\n");
1642 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1643 "WMI vdev install key idx %d cipher %d len %d\n",
1644 arg
->key_idx
, arg
->key_cipher
, arg
->key_len
);
1650 ath11k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd
*cmd
,
1651 struct peer_assoc_params
*param
)
1653 cmd
->peer_flags
= 0;
1655 if (param
->is_wme_set
) {
1656 if (param
->qos_flag
)
1657 cmd
->peer_flags
|= WMI_PEER_QOS
;
1658 if (param
->apsd_flag
)
1659 cmd
->peer_flags
|= WMI_PEER_APSD
;
1661 cmd
->peer_flags
|= WMI_PEER_HT
;
1663 cmd
->peer_flags
|= WMI_PEER_40MHZ
;
1665 cmd
->peer_flags
|= WMI_PEER_80MHZ
;
1667 cmd
->peer_flags
|= WMI_PEER_160MHZ
;
1669 /* Typically if STBC is enabled for VHT it should be enabled
1672 if (param
->stbc_flag
)
1673 cmd
->peer_flags
|= WMI_PEER_STBC
;
1675 /* Typically if LDPC is enabled for VHT it should be enabled
1678 if (param
->ldpc_flag
)
1679 cmd
->peer_flags
|= WMI_PEER_LDPC
;
1681 if (param
->static_mimops_flag
)
1682 cmd
->peer_flags
|= WMI_PEER_STATIC_MIMOPS
;
1683 if (param
->dynamic_mimops_flag
)
1684 cmd
->peer_flags
|= WMI_PEER_DYN_MIMOPS
;
1685 if (param
->spatial_mux_flag
)
1686 cmd
->peer_flags
|= WMI_PEER_SPATIAL_MUX
;
1687 if (param
->vht_flag
)
1688 cmd
->peer_flags
|= WMI_PEER_VHT
;
1690 cmd
->peer_flags
|= WMI_PEER_HE
;
1691 if (param
->twt_requester
)
1692 cmd
->peer_flags
|= WMI_PEER_TWT_REQ
;
1693 if (param
->twt_responder
)
1694 cmd
->peer_flags
|= WMI_PEER_TWT_RESP
;
1697 /* Suppress authorization for all AUTH modes that need 4-way handshake
1698 * (during re-association).
1699 * Authorization will be done for these modes on key installation.
1701 if (param
->auth_flag
)
1702 cmd
->peer_flags
|= WMI_PEER_AUTH
;
1703 if (param
->need_ptk_4_way
)
1704 cmd
->peer_flags
|= WMI_PEER_NEED_PTK_4_WAY
;
1706 cmd
->peer_flags
&= ~WMI_PEER_NEED_PTK_4_WAY
;
1707 if (param
->need_gtk_2_way
)
1708 cmd
->peer_flags
|= WMI_PEER_NEED_GTK_2_WAY
;
1709 /* safe mode bypass the 4-way handshake */
1710 if (param
->safe_mode_enabled
)
1711 cmd
->peer_flags
&= ~(WMI_PEER_NEED_PTK_4_WAY
|
1712 WMI_PEER_NEED_GTK_2_WAY
);
1714 if (param
->is_pmf_enabled
)
1715 cmd
->peer_flags
|= WMI_PEER_PMF
;
1717 /* Disable AMSDU for station transmit, if user configures it */
1718 /* Disable AMSDU for AP transmit to 11n Stations, if user configures
1720 * if (param->amsdu_disable) Add after FW support
1723 /* Target asserts if node is marked HT and all MCS is set to 0.
1724 * Mark the node as non-HT if all the mcs rates are disabled through
1727 if (param
->peer_ht_rates
.num_rates
== 0)
1728 cmd
->peer_flags
&= ~WMI_PEER_HT
;
1731 int ath11k_wmi_send_peer_assoc_cmd(struct ath11k
*ar
,
1732 struct peer_assoc_params
*param
)
1734 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1735 struct wmi_peer_assoc_complete_cmd
*cmd
;
1736 struct wmi_vht_rate_set
*mcs
;
1737 struct wmi_he_rate_set
*he_mcs
;
1738 struct sk_buff
*skb
;
1739 struct wmi_tlv
*tlv
;
1741 u32 peer_legacy_rates_align
;
1742 u32 peer_ht_rates_align
;
1745 peer_legacy_rates_align
= roundup(param
->peer_legacy_rates
.num_rates
,
1747 peer_ht_rates_align
= roundup(param
->peer_ht_rates
.num_rates
,
1750 len
= sizeof(*cmd
) +
1751 TLV_HDR_SIZE
+ (peer_legacy_rates_align
* sizeof(u8
)) +
1752 TLV_HDR_SIZE
+ (peer_ht_rates_align
* sizeof(u8
)) +
1753 sizeof(*mcs
) + TLV_HDR_SIZE
+
1754 (sizeof(*he_mcs
) * param
->peer_he_mcs_count
);
1756 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
1763 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1764 WMI_TAG_PEER_ASSOC_COMPLETE_CMD
) |
1765 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1767 cmd
->vdev_id
= param
->vdev_id
;
1769 cmd
->peer_new_assoc
= param
->peer_new_assoc
;
1770 cmd
->peer_associd
= param
->peer_associd
;
1772 ath11k_wmi_copy_peer_flags(cmd
, param
);
1774 ether_addr_copy(cmd
->peer_macaddr
.addr
, param
->peer_mac
);
1776 cmd
->peer_rate_caps
= param
->peer_rate_caps
;
1777 cmd
->peer_caps
= param
->peer_caps
;
1778 cmd
->peer_listen_intval
= param
->peer_listen_intval
;
1779 cmd
->peer_ht_caps
= param
->peer_ht_caps
;
1780 cmd
->peer_max_mpdu
= param
->peer_max_mpdu
;
1781 cmd
->peer_mpdu_density
= param
->peer_mpdu_density
;
1782 cmd
->peer_vht_caps
= param
->peer_vht_caps
;
1783 cmd
->peer_phymode
= param
->peer_phymode
;
1785 /* Update 11ax capabilities */
1786 cmd
->peer_he_cap_info
= param
->peer_he_cap_macinfo
[0];
1787 cmd
->peer_he_cap_info_ext
= param
->peer_he_cap_macinfo
[1];
1788 cmd
->peer_he_cap_info_internal
= param
->peer_he_cap_macinfo_internal
;
1789 cmd
->peer_he_ops
= param
->peer_he_ops
;
1790 memcpy(&cmd
->peer_he_cap_phy
, ¶m
->peer_he_cap_phyinfo
,
1791 sizeof(param
->peer_he_cap_phyinfo
));
1792 memcpy(&cmd
->peer_ppet
, ¶m
->peer_ppet
,
1793 sizeof(param
->peer_ppet
));
1795 /* Update peer legacy rate information */
1796 ptr
+= sizeof(*cmd
);
1799 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_BYTE
) |
1800 FIELD_PREP(WMI_TLV_LEN
, peer_legacy_rates_align
);
1802 ptr
+= TLV_HDR_SIZE
;
1804 cmd
->num_peer_legacy_rates
= param
->peer_legacy_rates
.num_rates
;
1805 memcpy(ptr
, param
->peer_legacy_rates
.rates
,
1806 param
->peer_legacy_rates
.num_rates
);
1808 /* Update peer HT rate information */
1809 ptr
+= peer_legacy_rates_align
;
1812 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_BYTE
) |
1813 FIELD_PREP(WMI_TLV_LEN
, peer_ht_rates_align
);
1814 ptr
+= TLV_HDR_SIZE
;
1815 cmd
->num_peer_ht_rates
= param
->peer_ht_rates
.num_rates
;
1816 memcpy(ptr
, param
->peer_ht_rates
.rates
,
1817 param
->peer_ht_rates
.num_rates
);
1820 ptr
+= peer_ht_rates_align
;
1824 mcs
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VHT_RATE_SET
) |
1825 FIELD_PREP(WMI_TLV_LEN
, sizeof(*mcs
) - TLV_HDR_SIZE
);
1827 cmd
->peer_nss
= param
->peer_nss
;
1829 /* Update bandwidth-NSS mapping */
1830 cmd
->peer_bw_rxnss_override
= 0;
1831 cmd
->peer_bw_rxnss_override
|= param
->peer_bw_rxnss_override
;
1833 if (param
->vht_capable
) {
1834 mcs
->rx_max_rate
= param
->rx_max_rate
;
1835 mcs
->rx_mcs_set
= param
->rx_mcs_set
;
1836 mcs
->tx_max_rate
= param
->tx_max_rate
;
1837 mcs
->tx_mcs_set
= param
->tx_mcs_set
;
1841 cmd
->peer_he_mcs
= param
->peer_he_mcs_count
;
1843 ptr
+= sizeof(*mcs
);
1845 len
= param
->peer_he_mcs_count
* sizeof(*he_mcs
);
1848 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
1849 FIELD_PREP(WMI_TLV_LEN
, len
);
1850 ptr
+= TLV_HDR_SIZE
;
1852 /* Loop through the HE rate set */
1853 for (i
= 0; i
< param
->peer_he_mcs_count
; i
++) {
1855 he_mcs
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1856 WMI_TAG_HE_RATE_SET
) |
1857 FIELD_PREP(WMI_TLV_LEN
,
1858 sizeof(*he_mcs
) - TLV_HDR_SIZE
);
1860 he_mcs
->rx_mcs_set
= param
->peer_he_rx_mcs_set
[i
];
1861 he_mcs
->tx_mcs_set
= param
->peer_he_tx_mcs_set
[i
];
1862 ptr
+= sizeof(*he_mcs
);
1865 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PEER_ASSOC_CMDID
);
1868 "failed to send WMI_PEER_ASSOC_CMDID\n");
1872 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1873 "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x\n",
1874 cmd
->vdev_id
, cmd
->peer_associd
, param
->peer_mac
,
1875 cmd
->peer_flags
, cmd
->peer_rate_caps
, cmd
->peer_caps
,
1876 cmd
->peer_listen_intval
, cmd
->peer_ht_caps
,
1877 cmd
->peer_max_mpdu
, cmd
->peer_nss
, cmd
->peer_phymode
,
1878 cmd
->peer_mpdu_density
,
1879 cmd
->peer_vht_caps
, cmd
->peer_he_cap_info
,
1880 cmd
->peer_he_ops
, cmd
->peer_he_cap_info_ext
,
1881 cmd
->peer_he_cap_phy
[0], cmd
->peer_he_cap_phy
[1],
1882 cmd
->peer_he_cap_phy
[2],
1883 cmd
->peer_bw_rxnss_override
);
1888 void ath11k_wmi_start_scan_init(struct ath11k
*ar
,
1889 struct scan_req_params
*arg
)
1891 /* setup commonly used values */
1892 arg
->scan_req_id
= 1;
1893 arg
->scan_priority
= WMI_SCAN_PRIORITY_LOW
;
1894 arg
->dwell_time_active
= 50;
1895 arg
->dwell_time_active_2g
= 0;
1896 arg
->dwell_time_passive
= 150;
1897 arg
->min_rest_time
= 50;
1898 arg
->max_rest_time
= 500;
1899 arg
->repeat_probe_time
= 0;
1900 arg
->probe_spacing_time
= 0;
1902 arg
->max_scan_time
= 20000;
1903 arg
->probe_delay
= 5;
1904 arg
->notify_scan_events
= WMI_SCAN_EVENT_STARTED
|
1905 WMI_SCAN_EVENT_COMPLETED
|
1906 WMI_SCAN_EVENT_BSS_CHANNEL
|
1907 WMI_SCAN_EVENT_FOREIGN_CHAN
|
1908 WMI_SCAN_EVENT_DEQUEUED
;
1909 arg
->scan_flags
|= WMI_SCAN_CHAN_STAT_EVENT
;
1914 ath11k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd
*cmd
,
1915 struct scan_req_params
*param
)
1917 /* Scan events subscription */
1918 if (param
->scan_ev_started
)
1919 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_STARTED
;
1920 if (param
->scan_ev_completed
)
1921 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_COMPLETED
;
1922 if (param
->scan_ev_bss_chan
)
1923 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_BSS_CHANNEL
;
1924 if (param
->scan_ev_foreign_chan
)
1925 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_FOREIGN_CHAN
;
1926 if (param
->scan_ev_dequeued
)
1927 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_DEQUEUED
;
1928 if (param
->scan_ev_preempted
)
1929 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_PREEMPTED
;
1930 if (param
->scan_ev_start_failed
)
1931 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_START_FAILED
;
1932 if (param
->scan_ev_restarted
)
1933 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_RESTARTED
;
1934 if (param
->scan_ev_foreign_chn_exit
)
1935 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT
;
1936 if (param
->scan_ev_suspended
)
1937 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_SUSPENDED
;
1938 if (param
->scan_ev_resumed
)
1939 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_RESUMED
;
1941 /** Set scan control flags */
1942 cmd
->scan_ctrl_flags
= 0;
1943 if (param
->scan_f_passive
)
1944 cmd
->scan_ctrl_flags
|= WMI_SCAN_FLAG_PASSIVE
;
1945 if (param
->scan_f_strict_passive_pch
)
1946 cmd
->scan_ctrl_flags
|= WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN
;
1947 if (param
->scan_f_promisc_mode
)
1948 cmd
->scan_ctrl_flags
|= WMI_SCAN_FILTER_PROMISCUOS
;
1949 if (param
->scan_f_capture_phy_err
)
1950 cmd
->scan_ctrl_flags
|= WMI_SCAN_CAPTURE_PHY_ERROR
;
1951 if (param
->scan_f_half_rate
)
1952 cmd
->scan_ctrl_flags
|= WMI_SCAN_FLAG_HALF_RATE_SUPPORT
;
1953 if (param
->scan_f_quarter_rate
)
1954 cmd
->scan_ctrl_flags
|= WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT
;
1955 if (param
->scan_f_cck_rates
)
1956 cmd
->scan_ctrl_flags
|= WMI_SCAN_ADD_CCK_RATES
;
1957 if (param
->scan_f_ofdm_rates
)
1958 cmd
->scan_ctrl_flags
|= WMI_SCAN_ADD_OFDM_RATES
;
1959 if (param
->scan_f_chan_stat_evnt
)
1960 cmd
->scan_ctrl_flags
|= WMI_SCAN_CHAN_STAT_EVENT
;
1961 if (param
->scan_f_filter_prb_req
)
1962 cmd
->scan_ctrl_flags
|= WMI_SCAN_FILTER_PROBE_REQ
;
1963 if (param
->scan_f_bcast_probe
)
1964 cmd
->scan_ctrl_flags
|= WMI_SCAN_ADD_BCAST_PROBE_REQ
;
1965 if (param
->scan_f_offchan_mgmt_tx
)
1966 cmd
->scan_ctrl_flags
|= WMI_SCAN_OFFCHAN_MGMT_TX
;
1967 if (param
->scan_f_offchan_data_tx
)
1968 cmd
->scan_ctrl_flags
|= WMI_SCAN_OFFCHAN_DATA_TX
;
1969 if (param
->scan_f_force_active_dfs_chn
)
1970 cmd
->scan_ctrl_flags
|= WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS
;
1971 if (param
->scan_f_add_tpc_ie_in_probe
)
1972 cmd
->scan_ctrl_flags
|= WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ
;
1973 if (param
->scan_f_add_ds_ie_in_probe
)
1974 cmd
->scan_ctrl_flags
|= WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ
;
1975 if (param
->scan_f_add_spoofed_mac_in_probe
)
1976 cmd
->scan_ctrl_flags
|= WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ
;
1977 if (param
->scan_f_add_rand_seq_in_probe
)
1978 cmd
->scan_ctrl_flags
|= WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ
;
1979 if (param
->scan_f_en_ie_whitelist_in_probe
)
1980 cmd
->scan_ctrl_flags
|=
1981 WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ
;
1983 /* for adaptive scan mode using 3 bits (21 - 23 bits) */
1984 WMI_SCAN_SET_DWELL_MODE(cmd
->scan_ctrl_flags
,
1985 param
->adaptive_dwell_time_mode
);
1988 int ath11k_wmi_send_scan_start_cmd(struct ath11k
*ar
,
1989 struct scan_req_params
*params
)
1991 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1992 struct wmi_start_scan_cmd
*cmd
;
1993 struct wmi_ssid
*ssid
= NULL
;
1994 struct wmi_mac_addr
*bssid
;
1995 struct sk_buff
*skb
;
1996 struct wmi_tlv
*tlv
;
2000 u8 extraie_len_with_pad
= 0;
2004 len
+= TLV_HDR_SIZE
;
2005 if (params
->num_chan
)
2006 len
+= params
->num_chan
* sizeof(u32
);
2008 len
+= TLV_HDR_SIZE
;
2009 if (params
->num_ssids
)
2010 len
+= params
->num_ssids
* sizeof(*ssid
);
2012 len
+= TLV_HDR_SIZE
;
2013 if (params
->num_bssid
)
2014 len
+= sizeof(*bssid
) * params
->num_bssid
;
2016 len
+= TLV_HDR_SIZE
;
2017 if (params
->extraie
.len
)
2018 extraie_len_with_pad
=
2019 roundup(params
->extraie
.len
, sizeof(u32
));
2020 len
+= extraie_len_with_pad
;
2022 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2029 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_START_SCAN_CMD
) |
2030 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2032 cmd
->scan_id
= params
->scan_id
;
2033 cmd
->scan_req_id
= params
->scan_req_id
;
2034 cmd
->vdev_id
= params
->vdev_id
;
2035 cmd
->scan_priority
= params
->scan_priority
;
2036 cmd
->notify_scan_events
= params
->notify_scan_events
;
2038 ath11k_wmi_copy_scan_event_cntrl_flags(cmd
, params
);
2040 cmd
->dwell_time_active
= params
->dwell_time_active
;
2041 cmd
->dwell_time_active_2g
= params
->dwell_time_active_2g
;
2042 cmd
->dwell_time_passive
= params
->dwell_time_passive
;
2043 cmd
->min_rest_time
= params
->min_rest_time
;
2044 cmd
->max_rest_time
= params
->max_rest_time
;
2045 cmd
->repeat_probe_time
= params
->repeat_probe_time
;
2046 cmd
->probe_spacing_time
= params
->probe_spacing_time
;
2047 cmd
->idle_time
= params
->idle_time
;
2048 cmd
->max_scan_time
= params
->max_scan_time
;
2049 cmd
->probe_delay
= params
->probe_delay
;
2050 cmd
->burst_duration
= params
->burst_duration
;
2051 cmd
->num_chan
= params
->num_chan
;
2052 cmd
->num_bssid
= params
->num_bssid
;
2053 cmd
->num_ssids
= params
->num_ssids
;
2054 cmd
->ie_len
= params
->extraie
.len
;
2055 cmd
->n_probes
= params
->n_probes
;
2057 ptr
+= sizeof(*cmd
);
2059 len
= params
->num_chan
* sizeof(u32
);
2062 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_UINT32
) |
2063 FIELD_PREP(WMI_TLV_LEN
, len
);
2064 ptr
+= TLV_HDR_SIZE
;
2065 tmp_ptr
= (u32
*)ptr
;
2067 for (i
= 0; i
< params
->num_chan
; ++i
)
2068 tmp_ptr
[i
] = params
->chan_list
[i
];
2072 len
= params
->num_ssids
* sizeof(*ssid
);
2074 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_FIXED_STRUCT
) |
2075 FIELD_PREP(WMI_TLV_LEN
, len
);
2077 ptr
+= TLV_HDR_SIZE
;
2079 if (params
->num_ssids
) {
2081 for (i
= 0; i
< params
->num_ssids
; ++i
) {
2082 ssid
->ssid_len
= params
->ssid
[i
].length
;
2083 memcpy(ssid
->ssid
, params
->ssid
[i
].ssid
,
2084 params
->ssid
[i
].length
);
2089 ptr
+= (params
->num_ssids
* sizeof(*ssid
));
2090 len
= params
->num_bssid
* sizeof(*bssid
);
2092 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_FIXED_STRUCT
) |
2093 FIELD_PREP(WMI_TLV_LEN
, len
);
2095 ptr
+= TLV_HDR_SIZE
;
2098 if (params
->num_bssid
) {
2099 for (i
= 0; i
< params
->num_bssid
; ++i
) {
2100 ether_addr_copy(bssid
->addr
,
2101 params
->bssid_list
[i
].addr
);
2106 ptr
+= params
->num_bssid
* sizeof(*bssid
);
2108 len
= extraie_len_with_pad
;
2110 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_BYTE
) |
2111 FIELD_PREP(WMI_TLV_LEN
, len
);
2112 ptr
+= TLV_HDR_SIZE
;
2114 if (params
->extraie
.len
)
2115 memcpy(ptr
, params
->extraie
.ptr
,
2116 params
->extraie
.len
);
2118 ptr
+= extraie_len_with_pad
;
2120 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2121 WMI_START_SCAN_CMDID
);
2123 ath11k_warn(ar
->ab
, "failed to send WMI_START_SCAN_CMDID\n");
2130 int ath11k_wmi_send_scan_stop_cmd(struct ath11k
*ar
,
2131 struct scan_cancel_param
*param
)
2133 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2134 struct wmi_stop_scan_cmd
*cmd
;
2135 struct sk_buff
*skb
;
2138 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2142 cmd
= (struct wmi_stop_scan_cmd
*)skb
->data
;
2144 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_STOP_SCAN_CMD
) |
2145 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2147 cmd
->vdev_id
= param
->vdev_id
;
2148 cmd
->requestor
= param
->requester
;
2149 cmd
->scan_id
= param
->scan_id
;
2150 cmd
->pdev_id
= param
->pdev_id
;
2151 /* stop the scan with the corresponding scan_id */
2152 if (param
->req_type
== WLAN_SCAN_CANCEL_PDEV_ALL
) {
2153 /* Cancelling all scans */
2154 cmd
->req_type
= WMI_SCAN_STOP_ALL
;
2155 } else if (param
->req_type
== WLAN_SCAN_CANCEL_VDEV_ALL
) {
2156 /* Cancelling VAP scans */
2157 cmd
->req_type
= WMI_SCN_STOP_VAP_ALL
;
2158 } else if (param
->req_type
== WLAN_SCAN_CANCEL_SINGLE
) {
2159 /* Cancelling specific scan */
2160 cmd
->req_type
= WMI_SCAN_STOP_ONE
;
2162 ath11k_warn(ar
->ab
, "invalid scan cancel param %d",
2168 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2169 WMI_STOP_SCAN_CMDID
);
2171 ath11k_warn(ar
->ab
, "failed to send WMI_STOP_SCAN_CMDID\n");
2178 int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k
*ar
,
2179 struct scan_chan_list_params
*chan_list
)
2181 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2182 struct wmi_scan_chan_list_cmd
*cmd
;
2183 struct sk_buff
*skb
;
2184 struct wmi_channel
*chan_info
;
2185 struct channel_param
*tchan_info
;
2186 struct wmi_tlv
*tlv
;
2191 len
= sizeof(*cmd
) + TLV_HDR_SIZE
+
2192 sizeof(*chan_info
) * chan_list
->nallchans
;
2194 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2198 cmd
= (struct wmi_scan_chan_list_cmd
*)skb
->data
;
2199 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_SCAN_CHAN_LIST_CMD
) |
2200 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2202 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2203 "WMI no.of chan = %d len = %d\n", chan_list
->nallchans
, len
);
2204 cmd
->pdev_id
= chan_list
->pdev_id
;
2205 cmd
->num_scan_chans
= chan_list
->nallchans
;
2207 ptr
= skb
->data
+ sizeof(*cmd
);
2209 len
= sizeof(*chan_info
) * chan_list
->nallchans
;
2211 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
2212 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
2213 ptr
+= TLV_HDR_SIZE
;
2215 tchan_info
= &chan_list
->ch_param
[0];
2217 for (i
= 0; i
< chan_list
->nallchans
; ++i
) {
2219 memset(chan_info
, 0, sizeof(*chan_info
));
2220 len
= sizeof(*chan_info
);
2221 chan_info
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
2223 FIELD_PREP(WMI_TLV_LEN
,
2224 len
- TLV_HDR_SIZE
);
2226 reg1
= &chan_info
->reg_info_1
;
2227 reg2
= &chan_info
->reg_info_2
;
2228 chan_info
->mhz
= tchan_info
->mhz
;
2229 chan_info
->band_center_freq1
= tchan_info
->cfreq1
;
2230 chan_info
->band_center_freq2
= tchan_info
->cfreq2
;
2232 if (tchan_info
->is_chan_passive
)
2233 chan_info
->info
|= WMI_CHAN_INFO_PASSIVE
;
2234 if (tchan_info
->allow_he
)
2235 chan_info
->info
|= WMI_CHAN_INFO_ALLOW_HE
;
2236 else if (tchan_info
->allow_vht
)
2237 chan_info
->info
|= WMI_CHAN_INFO_ALLOW_VHT
;
2238 else if (tchan_info
->allow_ht
)
2239 chan_info
->info
|= WMI_CHAN_INFO_ALLOW_HT
;
2240 if (tchan_info
->half_rate
)
2241 chan_info
->info
|= WMI_CHAN_INFO_HALF_RATE
;
2242 if (tchan_info
->quarter_rate
)
2243 chan_info
->info
|= WMI_CHAN_INFO_QUARTER_RATE
;
2245 chan_info
->info
|= FIELD_PREP(WMI_CHAN_INFO_MODE
,
2246 tchan_info
->phy_mode
);
2247 *reg1
|= FIELD_PREP(WMI_CHAN_REG_INFO1_MIN_PWR
,
2248 tchan_info
->minpower
);
2249 *reg1
|= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR
,
2250 tchan_info
->maxpower
);
2251 *reg1
|= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR
,
2252 tchan_info
->maxregpower
);
2253 *reg1
|= FIELD_PREP(WMI_CHAN_REG_INFO1_REG_CLS
,
2254 tchan_info
->reg_class_id
);
2255 *reg2
|= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX
,
2256 tchan_info
->antennamax
);
2258 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2259 "WMI chan scan list chan[%d] = %u\n",
2262 ptr
+= sizeof(*chan_info
);
2267 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_SCAN_CHAN_LIST_CMDID
);
2269 ath11k_warn(ar
->ab
, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
2276 int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k
*ar
, u32 vdev_id
,
2277 struct wmi_wmm_params_all_arg
*param
)
2279 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2280 struct wmi_vdev_set_wmm_params_cmd
*cmd
;
2281 struct wmi_wmm_params
*wmm_param
;
2282 struct wmi_wmm_params_arg
*wmi_wmm_arg
;
2283 struct sk_buff
*skb
;
2286 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2290 cmd
= (struct wmi_vdev_set_wmm_params_cmd
*)skb
->data
;
2291 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
2292 WMI_TAG_VDEV_SET_WMM_PARAMS_CMD
) |
2293 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2295 cmd
->vdev_id
= vdev_id
;
2296 cmd
->wmm_param_type
= 0;
2298 for (ac
= 0; ac
< WME_NUM_AC
; ac
++) {
2301 wmi_wmm_arg
= ¶m
->ac_be
;
2304 wmi_wmm_arg
= ¶m
->ac_bk
;
2307 wmi_wmm_arg
= ¶m
->ac_vi
;
2310 wmi_wmm_arg
= ¶m
->ac_vo
;
2314 wmm_param
= (struct wmi_wmm_params
*)&cmd
->wmm_params
[ac
];
2315 wmm_param
->tlv_header
=
2316 FIELD_PREP(WMI_TLV_TAG
,
2317 WMI_TAG_VDEV_SET_WMM_PARAMS_CMD
) |
2318 FIELD_PREP(WMI_TLV_LEN
,
2319 sizeof(*wmm_param
) - TLV_HDR_SIZE
);
2321 wmm_param
->aifs
= wmi_wmm_arg
->aifs
;
2322 wmm_param
->cwmin
= wmi_wmm_arg
->cwmin
;
2323 wmm_param
->cwmax
= wmi_wmm_arg
->cwmax
;
2324 wmm_param
->txoplimit
= wmi_wmm_arg
->txop
;
2325 wmm_param
->acm
= wmi_wmm_arg
->acm
;
2326 wmm_param
->no_ack
= wmi_wmm_arg
->no_ack
;
2328 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2329 "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
2330 ac
, wmm_param
->aifs
, wmm_param
->cwmin
,
2331 wmm_param
->cwmax
, wmm_param
->txoplimit
,
2332 wmm_param
->acm
, wmm_param
->no_ack
);
2334 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2335 WMI_VDEV_SET_WMM_PARAMS_CMDID
);
2338 "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID");
2345 int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k
*ar
,
2348 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2349 struct wmi_dfs_phyerr_offload_cmd
*cmd
;
2350 struct sk_buff
*skb
;
2353 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2357 cmd
= (struct wmi_dfs_phyerr_offload_cmd
*)skb
->data
;
2359 FIELD_PREP(WMI_TLV_TAG
,
2360 WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD
) |
2361 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2363 cmd
->pdev_id
= pdev_id
;
2365 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2366 "WMI dfs phy err offload enable pdev id %d\n", pdev_id
);
2368 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2369 WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID
);
2372 "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n");
2379 int ath11k_wmi_delba_send(struct ath11k
*ar
, u32 vdev_id
, const u8
*mac
,
2380 u32 tid
, u32 initiator
, u32 reason
)
2382 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2383 struct wmi_delba_send_cmd
*cmd
;
2384 struct sk_buff
*skb
;
2387 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2391 cmd
= (struct wmi_delba_send_cmd
*)skb
->data
;
2392 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_DELBA_SEND_CMD
) |
2393 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2394 cmd
->vdev_id
= vdev_id
;
2395 ether_addr_copy(cmd
->peer_macaddr
.addr
, mac
);
2397 cmd
->initiator
= initiator
;
2398 cmd
->reasoncode
= reason
;
2400 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2401 "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
2402 vdev_id
, mac
, tid
, initiator
, reason
);
2404 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_DELBA_SEND_CMDID
);
2408 "failed to send WMI_DELBA_SEND_CMDID cmd\n");
2415 int ath11k_wmi_addba_set_resp(struct ath11k
*ar
, u32 vdev_id
, const u8
*mac
,
2416 u32 tid
, u32 status
)
2418 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2419 struct wmi_addba_setresponse_cmd
*cmd
;
2420 struct sk_buff
*skb
;
2423 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2427 cmd
= (struct wmi_addba_setresponse_cmd
*)skb
->data
;
2429 FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ADDBA_SETRESPONSE_CMD
) |
2430 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2431 cmd
->vdev_id
= vdev_id
;
2432 ether_addr_copy(cmd
->peer_macaddr
.addr
, mac
);
2434 cmd
->statuscode
= status
;
2436 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2437 "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
2438 vdev_id
, mac
, tid
, status
);
2440 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_ADDBA_SET_RESP_CMDID
);
2444 "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n");
2451 int ath11k_wmi_addba_send(struct ath11k
*ar
, u32 vdev_id
, const u8
*mac
,
2452 u32 tid
, u32 buf_size
)
2454 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2455 struct wmi_addba_send_cmd
*cmd
;
2456 struct sk_buff
*skb
;
2459 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2463 cmd
= (struct wmi_addba_send_cmd
*)skb
->data
;
2464 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ADDBA_SEND_CMD
) |
2465 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2466 cmd
->vdev_id
= vdev_id
;
2467 ether_addr_copy(cmd
->peer_macaddr
.addr
, mac
);
2469 cmd
->buffersize
= buf_size
;
2471 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2472 "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
2473 vdev_id
, mac
, tid
, buf_size
);
2475 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_ADDBA_SEND_CMDID
);
2479 "failed to send WMI_ADDBA_SEND_CMDID cmd\n");
2486 int ath11k_wmi_addba_clear_resp(struct ath11k
*ar
, u32 vdev_id
, const u8
*mac
)
2488 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2489 struct wmi_addba_clear_resp_cmd
*cmd
;
2490 struct sk_buff
*skb
;
2493 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2497 cmd
= (struct wmi_addba_clear_resp_cmd
*)skb
->data
;
2499 FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ADDBA_CLEAR_RESP_CMD
) |
2500 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2501 cmd
->vdev_id
= vdev_id
;
2502 ether_addr_copy(cmd
->peer_macaddr
.addr
, mac
);
2504 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2505 "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
2508 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_ADDBA_CLEAR_RESP_CMDID
);
2512 "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n");
2519 int ath11k_wmi_pdev_peer_pktlog_filter(struct ath11k
*ar
, u8
*addr
, u8 enable
)
2521 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2522 struct wmi_pdev_pktlog_filter_cmd
*cmd
;
2523 struct wmi_pdev_pktlog_filter_info
*info
;
2524 struct sk_buff
*skb
;
2525 struct wmi_tlv
*tlv
;
2529 len
= sizeof(*cmd
) + sizeof(*info
) + TLV_HDR_SIZE
;
2530 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2534 cmd
= (struct wmi_pdev_pktlog_filter_cmd
*)skb
->data
;
2536 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD
) |
2537 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2539 cmd
->pdev_id
= DP_HW2SW_MACID(ar
->pdev
->pdev_id
);
2541 cmd
->enable
= enable
;
2543 ptr
= skb
->data
+ sizeof(*cmd
);
2546 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
2547 FIELD_PREP(WMI_TLV_LEN
, sizeof(*info
));
2549 ptr
+= TLV_HDR_SIZE
;
2552 ether_addr_copy(info
->peer_macaddr
.addr
, addr
);
2553 info
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO
) |
2554 FIELD_PREP(WMI_TLV_LEN
,
2555 sizeof(*info
) - TLV_HDR_SIZE
);
2557 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2558 WMI_PDEV_PKTLOG_FILTER_CMDID
);
2560 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
2568 ath11k_wmi_send_init_country_cmd(struct ath11k
*ar
,
2569 struct wmi_init_country_params init_cc_params
)
2571 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2572 struct wmi_init_country_cmd
*cmd
;
2573 struct sk_buff
*skb
;
2576 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2580 cmd
= (struct wmi_init_country_cmd
*)skb
->data
;
2582 FIELD_PREP(WMI_TLV_TAG
,
2583 WMI_TAG_SET_INIT_COUNTRY_CMD
) |
2584 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2586 cmd
->pdev_id
= ar
->pdev
->pdev_id
;
2588 switch (init_cc_params
.flags
) {
2590 cmd
->init_cc_type
= WMI_COUNTRY_INFO_TYPE_ALPHA
;
2591 memcpy((u8
*)&cmd
->cc_info
.alpha2
,
2592 init_cc_params
.cc_info
.alpha2
, 3);
2595 cmd
->init_cc_type
= WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE
;
2596 cmd
->cc_info
.country_code
= init_cc_params
.cc_info
.country_code
;
2599 cmd
->init_cc_type
= WMI_COUNTRY_INFO_TYPE_REGDOMAIN
;
2600 cmd
->cc_info
.regdom_id
= init_cc_params
.cc_info
.regdom_id
;
2607 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2608 WMI_SET_INIT_COUNTRY_CMDID
);
2613 "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n",
2622 ath11k_wmi_send_thermal_mitigation_param_cmd(struct ath11k
*ar
,
2623 struct thermal_mitigation_params
*param
)
2625 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2626 struct wmi_therm_throt_config_request_cmd
*cmd
;
2627 struct wmi_therm_throt_level_config_info
*lvl_conf
;
2628 struct wmi_tlv
*tlv
;
2629 struct sk_buff
*skb
;
2632 len
= sizeof(*cmd
) + TLV_HDR_SIZE
+
2633 THERMAL_LEVELS
* sizeof(struct wmi_therm_throt_level_config_info
);
2635 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2639 cmd
= (struct wmi_therm_throt_config_request_cmd
*)skb
->data
;
2641 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_THERM_THROT_CONFIG_REQUEST
) |
2642 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2644 cmd
->pdev_id
= ar
->pdev
->pdev_id
;
2645 cmd
->enable
= param
->enable
;
2646 cmd
->dc
= param
->dc
;
2647 cmd
->dc_per_event
= param
->dc_per_event
;
2648 cmd
->therm_throt_levels
= THERMAL_LEVELS
;
2650 tlv
= (struct wmi_tlv
*)(skb
->data
+ sizeof(*cmd
));
2651 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
2652 FIELD_PREP(WMI_TLV_LEN
,
2654 sizeof(struct wmi_therm_throt_level_config_info
)));
2656 lvl_conf
= (struct wmi_therm_throt_level_config_info
*)(skb
->data
+
2659 for (i
= 0; i
< THERMAL_LEVELS
; i
++) {
2660 lvl_conf
->tlv_header
=
2661 FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_THERM_THROT_LEVEL_CONFIG_INFO
) |
2662 FIELD_PREP(WMI_TLV_LEN
, sizeof(*lvl_conf
) - TLV_HDR_SIZE
);
2664 lvl_conf
->temp_lwm
= param
->levelconf
[i
].tmplwm
;
2665 lvl_conf
->temp_hwm
= param
->levelconf
[i
].tmphwm
;
2666 lvl_conf
->dc_off_percent
= param
->levelconf
[i
].dcoffpercent
;
2667 lvl_conf
->prio
= param
->levelconf
[i
].priority
;
2671 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_THERM_THROT_SET_CONF_CMDID
);
2673 ath11k_warn(ar
->ab
, "failed to send THERM_THROT_SET_CONF cmd\n");
2677 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2678 "WMI vdev set thermal throt pdev_id %d enable %d dc %d dc_per_event %x levels %d\n",
2679 ar
->pdev
->pdev_id
, param
->enable
, param
->dc
,
2680 param
->dc_per_event
, THERMAL_LEVELS
);
2685 int ath11k_wmi_pdev_pktlog_enable(struct ath11k
*ar
, u32 pktlog_filter
)
2687 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2688 struct wmi_pktlog_enable_cmd
*cmd
;
2689 struct sk_buff
*skb
;
2692 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2696 cmd
= (struct wmi_pktlog_enable_cmd
*)skb
->data
;
2698 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_PKTLOG_ENABLE_CMD
) |
2699 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2701 cmd
->pdev_id
= DP_HW2SW_MACID(ar
->pdev
->pdev_id
);
2702 cmd
->evlist
= pktlog_filter
;
2703 cmd
->enable
= ATH11K_WMI_PKTLOG_ENABLE_FORCE
;
2705 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2706 WMI_PDEV_PKTLOG_ENABLE_CMDID
);
2708 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
2715 int ath11k_wmi_pdev_pktlog_disable(struct ath11k
*ar
)
2717 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2718 struct wmi_pktlog_disable_cmd
*cmd
;
2719 struct sk_buff
*skb
;
2722 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2726 cmd
= (struct wmi_pktlog_disable_cmd
*)skb
->data
;
2728 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_PKTLOG_DISABLE_CMD
) |
2729 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2731 cmd
->pdev_id
= DP_HW2SW_MACID(ar
->pdev
->pdev_id
);
2733 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2734 WMI_PDEV_PKTLOG_DISABLE_CMDID
);
2736 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
2744 ath11k_wmi_send_twt_enable_cmd(struct ath11k
*ar
, u32 pdev_id
)
2746 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2747 struct ath11k_base
*ab
= wmi
->wmi_ab
->ab
;
2748 struct wmi_twt_enable_params_cmd
*cmd
;
2749 struct sk_buff
*skb
;
2754 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2758 cmd
= (struct wmi_twt_enable_params_cmd
*)skb
->data
;
2759 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_TWT_ENABLE_CMD
) |
2760 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
2761 cmd
->pdev_id
= pdev_id
;
2762 cmd
->sta_cong_timer_ms
= ATH11K_TWT_DEF_STA_CONG_TIMER_MS
;
2763 cmd
->default_slot_size
= ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE
;
2764 cmd
->congestion_thresh_setup
= ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP
;
2765 cmd
->congestion_thresh_teardown
=
2766 ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN
;
2767 cmd
->congestion_thresh_critical
=
2768 ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL
;
2769 cmd
->interference_thresh_teardown
=
2770 ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN
;
2771 cmd
->interference_thresh_setup
=
2772 ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP
;
2773 cmd
->min_no_sta_setup
= ATH11K_TWT_DEF_MIN_NO_STA_SETUP
;
2774 cmd
->min_no_sta_teardown
= ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN
;
2775 cmd
->no_of_bcast_mcast_slots
= ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS
;
2776 cmd
->min_no_twt_slots
= ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS
;
2777 cmd
->max_no_sta_twt
= ATH11K_TWT_DEF_MAX_NO_STA_TWT
;
2778 cmd
->mode_check_interval
= ATH11K_TWT_DEF_MODE_CHECK_INTERVAL
;
2779 cmd
->add_sta_slot_interval
= ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL
;
2780 cmd
->remove_sta_slot_interval
=
2781 ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL
;
2782 /* TODO add MBSSID support */
2783 cmd
->mbss_support
= 0;
2785 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2786 WMI_TWT_ENABLE_CMDID
);
2788 ath11k_warn(ab
, "Failed to send WMI_TWT_ENABLE_CMDID");
2795 ath11k_wmi_send_twt_disable_cmd(struct ath11k
*ar
, u32 pdev_id
)
2797 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2798 struct ath11k_base
*ab
= wmi
->wmi_ab
->ab
;
2799 struct wmi_twt_disable_params_cmd
*cmd
;
2800 struct sk_buff
*skb
;
2805 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2809 cmd
= (struct wmi_twt_disable_params_cmd
*)skb
->data
;
2810 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_TWT_DISABLE_CMD
) |
2811 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
2812 cmd
->pdev_id
= pdev_id
;
2814 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2815 WMI_TWT_DISABLE_CMDID
);
2817 ath11k_warn(ab
, "Failed to send WMI_TWT_DISABLE_CMDID");
2824 ath11k_wmi_send_obss_spr_cmd(struct ath11k
*ar
, u32 vdev_id
,
2825 struct ieee80211_he_obss_pd
*he_obss_pd
)
2827 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2828 struct ath11k_base
*ab
= wmi
->wmi_ab
->ab
;
2829 struct wmi_obss_spatial_reuse_params_cmd
*cmd
;
2830 struct sk_buff
*skb
;
2835 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2839 cmd
= (struct wmi_obss_spatial_reuse_params_cmd
*)skb
->data
;
2840 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
2841 WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD
) |
2842 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
2843 cmd
->vdev_id
= vdev_id
;
2844 cmd
->enable
= he_obss_pd
->enable
;
2845 cmd
->obss_min
= he_obss_pd
->min_offset
;
2846 cmd
->obss_max
= he_obss_pd
->max_offset
;
2848 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2849 WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID
);
2852 "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID");
2859 ath11k_wmi_send_obss_color_collision_cfg_cmd(struct ath11k
*ar
, u32 vdev_id
,
2860 u8 bss_color
, u32 period
,
2863 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2864 struct ath11k_base
*ab
= wmi
->wmi_ab
->ab
;
2865 struct wmi_obss_color_collision_cfg_params_cmd
*cmd
;
2866 struct sk_buff
*skb
;
2871 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2875 cmd
= (struct wmi_obss_color_collision_cfg_params_cmd
*)skb
->data
;
2876 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
2877 WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG
) |
2878 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
2879 cmd
->vdev_id
= vdev_id
;
2880 cmd
->evt_type
= enable
? ATH11K_OBSS_COLOR_COLLISION_DETECTION
:
2881 ATH11K_OBSS_COLOR_COLLISION_DETECTION_DISABLE
;
2882 cmd
->current_bss_color
= bss_color
;
2883 cmd
->detection_period_ms
= period
;
2884 cmd
->scan_period_ms
= ATH11K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS
;
2885 cmd
->free_slot_expiry_time_ms
= 0;
2888 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2889 "wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n",
2890 cmd
->vdev_id
, cmd
->evt_type
, cmd
->current_bss_color
,
2891 cmd
->detection_period_ms
, cmd
->scan_period_ms
);
2893 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2894 WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID
);
2896 ath11k_warn(ab
, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID");
2902 int ath11k_wmi_send_bss_color_change_enable_cmd(struct ath11k
*ar
, u32 vdev_id
,
2905 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2906 struct ath11k_base
*ab
= wmi
->wmi_ab
->ab
;
2907 struct wmi_bss_color_change_enable_params_cmd
*cmd
;
2908 struct sk_buff
*skb
;
2913 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2917 cmd
= (struct wmi_bss_color_change_enable_params_cmd
*)skb
->data
;
2918 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_BSS_COLOR_CHANGE_ENABLE
) |
2919 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
2920 cmd
->vdev_id
= vdev_id
;
2921 cmd
->enable
= enable
? 1 : 0;
2923 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2924 "wmi_send_bss_color_change_enable id %d enable %d\n",
2925 cmd
->vdev_id
, cmd
->enable
);
2927 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2928 WMI_BSS_COLOR_CHANGE_ENABLE_CMDID
);
2930 ath11k_warn(ab
, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID");
2937 ath11k_fill_band_to_mac_param(struct ath11k_base
*soc
,
2938 struct wmi_host_pdev_band_to_mac
*band_to_mac
)
2941 struct ath11k_hal_reg_capabilities_ext
*hal_reg_cap
;
2942 struct ath11k_pdev
*pdev
;
2944 for (i
= 0; i
< soc
->num_radios
; i
++) {
2945 pdev
= &soc
->pdevs
[i
];
2946 hal_reg_cap
= &soc
->hal_reg_cap
[i
];
2947 band_to_mac
[i
].pdev_id
= pdev
->pdev_id
;
2949 switch (pdev
->cap
.supported_bands
) {
2950 case WMI_HOST_WLAN_2G_5G_CAP
:
2951 band_to_mac
[i
].start_freq
= hal_reg_cap
->low_2ghz_chan
;
2952 band_to_mac
[i
].end_freq
= hal_reg_cap
->high_5ghz_chan
;
2954 case WMI_HOST_WLAN_2G_CAP
:
2955 band_to_mac
[i
].start_freq
= hal_reg_cap
->low_2ghz_chan
;
2956 band_to_mac
[i
].end_freq
= hal_reg_cap
->high_2ghz_chan
;
2958 case WMI_HOST_WLAN_5G_CAP
:
2959 band_to_mac
[i
].start_freq
= hal_reg_cap
->low_5ghz_chan
;
2960 band_to_mac
[i
].end_freq
= hal_reg_cap
->high_5ghz_chan
;
2969 ath11k_wmi_copy_resource_config(struct wmi_resource_config
*wmi_cfg
,
2970 struct target_resource_config
*tg_cfg
)
2972 wmi_cfg
->num_vdevs
= tg_cfg
->num_vdevs
;
2973 wmi_cfg
->num_peers
= tg_cfg
->num_peers
;
2974 wmi_cfg
->num_offload_peers
= tg_cfg
->num_offload_peers
;
2975 wmi_cfg
->num_offload_reorder_buffs
= tg_cfg
->num_offload_reorder_buffs
;
2976 wmi_cfg
->num_peer_keys
= tg_cfg
->num_peer_keys
;
2977 wmi_cfg
->num_tids
= tg_cfg
->num_tids
;
2978 wmi_cfg
->ast_skid_limit
= tg_cfg
->ast_skid_limit
;
2979 wmi_cfg
->tx_chain_mask
= tg_cfg
->tx_chain_mask
;
2980 wmi_cfg
->rx_chain_mask
= tg_cfg
->rx_chain_mask
;
2981 wmi_cfg
->rx_timeout_pri
[0] = tg_cfg
->rx_timeout_pri
[0];
2982 wmi_cfg
->rx_timeout_pri
[1] = tg_cfg
->rx_timeout_pri
[1];
2983 wmi_cfg
->rx_timeout_pri
[2] = tg_cfg
->rx_timeout_pri
[2];
2984 wmi_cfg
->rx_timeout_pri
[3] = tg_cfg
->rx_timeout_pri
[3];
2985 wmi_cfg
->rx_decap_mode
= tg_cfg
->rx_decap_mode
;
2986 wmi_cfg
->scan_max_pending_req
= tg_cfg
->scan_max_pending_req
;
2987 wmi_cfg
->bmiss_offload_max_vdev
= tg_cfg
->bmiss_offload_max_vdev
;
2988 wmi_cfg
->roam_offload_max_vdev
= tg_cfg
->roam_offload_max_vdev
;
2989 wmi_cfg
->roam_offload_max_ap_profiles
=
2990 tg_cfg
->roam_offload_max_ap_profiles
;
2991 wmi_cfg
->num_mcast_groups
= tg_cfg
->num_mcast_groups
;
2992 wmi_cfg
->num_mcast_table_elems
= tg_cfg
->num_mcast_table_elems
;
2993 wmi_cfg
->mcast2ucast_mode
= tg_cfg
->mcast2ucast_mode
;
2994 wmi_cfg
->tx_dbg_log_size
= tg_cfg
->tx_dbg_log_size
;
2995 wmi_cfg
->num_wds_entries
= tg_cfg
->num_wds_entries
;
2996 wmi_cfg
->dma_burst_size
= tg_cfg
->dma_burst_size
;
2997 wmi_cfg
->mac_aggr_delim
= tg_cfg
->mac_aggr_delim
;
2998 wmi_cfg
->rx_skip_defrag_timeout_dup_detection_check
=
2999 tg_cfg
->rx_skip_defrag_timeout_dup_detection_check
;
3000 wmi_cfg
->vow_config
= tg_cfg
->vow_config
;
3001 wmi_cfg
->gtk_offload_max_vdev
= tg_cfg
->gtk_offload_max_vdev
;
3002 wmi_cfg
->num_msdu_desc
= tg_cfg
->num_msdu_desc
;
3003 wmi_cfg
->max_frag_entries
= tg_cfg
->max_frag_entries
;
3004 wmi_cfg
->num_tdls_vdevs
= tg_cfg
->num_tdls_vdevs
;
3005 wmi_cfg
->num_tdls_conn_table_entries
=
3006 tg_cfg
->num_tdls_conn_table_entries
;
3007 wmi_cfg
->beacon_tx_offload_max_vdev
=
3008 tg_cfg
->beacon_tx_offload_max_vdev
;
3009 wmi_cfg
->num_multicast_filter_entries
=
3010 tg_cfg
->num_multicast_filter_entries
;
3011 wmi_cfg
->num_wow_filters
= tg_cfg
->num_wow_filters
;
3012 wmi_cfg
->num_keep_alive_pattern
= tg_cfg
->num_keep_alive_pattern
;
3013 wmi_cfg
->keep_alive_pattern_size
= tg_cfg
->keep_alive_pattern_size
;
3014 wmi_cfg
->max_tdls_concurrent_sleep_sta
=
3015 tg_cfg
->max_tdls_concurrent_sleep_sta
;
3016 wmi_cfg
->max_tdls_concurrent_buffer_sta
=
3017 tg_cfg
->max_tdls_concurrent_buffer_sta
;
3018 wmi_cfg
->wmi_send_separate
= tg_cfg
->wmi_send_separate
;
3019 wmi_cfg
->num_ocb_vdevs
= tg_cfg
->num_ocb_vdevs
;
3020 wmi_cfg
->num_ocb_channels
= tg_cfg
->num_ocb_channels
;
3021 wmi_cfg
->num_ocb_schedules
= tg_cfg
->num_ocb_schedules
;
3022 wmi_cfg
->bpf_instruction_size
= tg_cfg
->bpf_instruction_size
;
3023 wmi_cfg
->max_bssid_rx_filters
= tg_cfg
->max_bssid_rx_filters
;
3024 wmi_cfg
->use_pdev_id
= tg_cfg
->use_pdev_id
;
3025 wmi_cfg
->flag1
= tg_cfg
->atf_config
;
3026 wmi_cfg
->peer_map_unmap_v2_support
= tg_cfg
->peer_map_unmap_v2_support
;
3027 wmi_cfg
->sched_params
= tg_cfg
->sched_params
;
3028 wmi_cfg
->twt_ap_pdev_count
= tg_cfg
->twt_ap_pdev_count
;
3029 wmi_cfg
->twt_ap_sta_count
= tg_cfg
->twt_ap_sta_count
;
3032 static int ath11k_init_cmd_send(struct ath11k_pdev_wmi
*wmi
,
3033 struct wmi_init_cmd_param
*param
)
3035 struct ath11k_base
*ab
= wmi
->wmi_ab
->ab
;
3036 struct sk_buff
*skb
;
3037 struct wmi_init_cmd
*cmd
;
3038 struct wmi_resource_config
*cfg
;
3039 struct wmi_pdev_set_hw_mode_cmd_param
*hw_mode
;
3040 struct wmi_pdev_band_to_mac
*band_to_mac
;
3041 struct wlan_host_mem_chunk
*host_mem_chunks
;
3042 struct wmi_tlv
*tlv
;
3045 u32 hw_mode_len
= 0;
3048 if (param
->hw_mode_id
!= WMI_HOST_HW_MODE_MAX
)
3049 hw_mode_len
= sizeof(*hw_mode
) + TLV_HDR_SIZE
+
3050 (param
->num_band_to_mac
* sizeof(*band_to_mac
));
3052 len
= sizeof(*cmd
) + TLV_HDR_SIZE
+ sizeof(*cfg
) + hw_mode_len
+
3053 (sizeof(*host_mem_chunks
) * WMI_MAX_MEM_REQS
);
3055 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
3059 cmd
= (struct wmi_init_cmd
*)skb
->data
;
3061 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_INIT_CMD
) |
3062 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
3064 ptr
= skb
->data
+ sizeof(*cmd
);
3067 ath11k_wmi_copy_resource_config(cfg
, param
->res_cfg
);
3069 cfg
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_RESOURCE_CONFIG
) |
3070 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cfg
) - TLV_HDR_SIZE
);
3072 ptr
+= sizeof(*cfg
);
3073 host_mem_chunks
= ptr
+ TLV_HDR_SIZE
;
3074 len
= sizeof(struct wlan_host_mem_chunk
);
3076 for (idx
= 0; idx
< param
->num_mem_chunks
; ++idx
) {
3077 host_mem_chunks
[idx
].tlv_header
=
3078 FIELD_PREP(WMI_TLV_TAG
,
3079 WMI_TAG_WLAN_HOST_MEMORY_CHUNK
) |
3080 FIELD_PREP(WMI_TLV_LEN
, len
);
3082 host_mem_chunks
[idx
].ptr
= param
->mem_chunks
[idx
].paddr
;
3083 host_mem_chunks
[idx
].size
= param
->mem_chunks
[idx
].len
;
3084 host_mem_chunks
[idx
].req_id
= param
->mem_chunks
[idx
].req_id
;
3086 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
3087 "WMI host mem chunk req_id %d paddr 0x%llx len %d\n",
3088 param
->mem_chunks
[idx
].req_id
,
3089 (u64
)param
->mem_chunks
[idx
].paddr
,
3090 param
->mem_chunks
[idx
].len
);
3092 cmd
->num_host_mem_chunks
= param
->num_mem_chunks
;
3093 len
= sizeof(struct wlan_host_mem_chunk
) * param
->num_mem_chunks
;
3095 /* num_mem_chunks is zero */
3097 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
3098 FIELD_PREP(WMI_TLV_LEN
, len
);
3099 ptr
+= TLV_HDR_SIZE
+ len
;
3101 if (param
->hw_mode_id
!= WMI_HOST_HW_MODE_MAX
) {
3102 hw_mode
= (struct wmi_pdev_set_hw_mode_cmd_param
*)ptr
;
3103 hw_mode
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
3104 WMI_TAG_PDEV_SET_HW_MODE_CMD
) |
3105 FIELD_PREP(WMI_TLV_LEN
,
3106 sizeof(*hw_mode
) - TLV_HDR_SIZE
);
3108 hw_mode
->hw_mode_index
= param
->hw_mode_id
;
3109 hw_mode
->num_band_to_mac
= param
->num_band_to_mac
;
3111 ptr
+= sizeof(*hw_mode
);
3113 len
= param
->num_band_to_mac
* sizeof(*band_to_mac
);
3115 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
3116 FIELD_PREP(WMI_TLV_LEN
, len
);
3118 ptr
+= TLV_HDR_SIZE
;
3119 len
= sizeof(*band_to_mac
);
3121 for (idx
= 0; idx
< param
->num_band_to_mac
; idx
++) {
3122 band_to_mac
= (void *)ptr
;
3124 band_to_mac
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
3125 WMI_TAG_PDEV_BAND_TO_MAC
) |
3126 FIELD_PREP(WMI_TLV_LEN
,
3127 len
- TLV_HDR_SIZE
);
3128 band_to_mac
->pdev_id
= param
->band_to_mac
[idx
].pdev_id
;
3129 band_to_mac
->start_freq
=
3130 param
->band_to_mac
[idx
].start_freq
;
3131 band_to_mac
->end_freq
=
3132 param
->band_to_mac
[idx
].end_freq
;
3133 ptr
+= sizeof(*band_to_mac
);
3137 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_INIT_CMDID
);
3139 ath11k_warn(ab
, "failed to send WMI_INIT_CMDID\n");
3146 int ath11k_wmi_pdev_lro_cfg(struct ath11k
*ar
,
3149 struct ath11k_wmi_pdev_lro_config_cmd
*cmd
;
3150 struct sk_buff
*skb
;
3153 skb
= ath11k_wmi_alloc_skb(ar
->wmi
->wmi_ab
, sizeof(*cmd
));
3157 cmd
= (struct ath11k_wmi_pdev_lro_config_cmd
*)skb
->data
;
3158 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_LRO_INFO_CMD
) |
3159 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
3161 get_random_bytes(cmd
->th_4
, sizeof(uint32_t) * ATH11K_IPV4_TH_SEED_SIZE
);
3162 get_random_bytes(cmd
->th_6
, sizeof(uint32_t) * ATH11K_IPV6_TH_SEED_SIZE
);
3164 cmd
->pdev_id
= pdev_id
;
3166 ret
= ath11k_wmi_cmd_send(ar
->wmi
, skb
, WMI_LRO_CONFIG_CMDID
);
3169 "failed to send lro cfg req wmi cmd\n");
3173 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
3174 "WMI lro cfg cmd pdev_id 0x%x\n", pdev_id
);
3181 int ath11k_wmi_wait_for_service_ready(struct ath11k_base
*ab
)
3183 unsigned long time_left
;
3185 time_left
= wait_for_completion_timeout(&ab
->wmi_ab
.service_ready
,
3186 WMI_SERVICE_READY_TIMEOUT_HZ
);
3193 int ath11k_wmi_wait_for_unified_ready(struct ath11k_base
*ab
)
3195 unsigned long time_left
;
3197 time_left
= wait_for_completion_timeout(&ab
->wmi_ab
.unified_ready
,
3198 WMI_SERVICE_READY_TIMEOUT_HZ
);
3205 int ath11k_wmi_cmd_init(struct ath11k_base
*ab
)
3207 struct ath11k_wmi_base
*wmi_sc
= &ab
->wmi_ab
;
3208 struct wmi_init_cmd_param init_param
;
3209 struct target_resource_config config
;
3211 memset(&init_param
, 0, sizeof(init_param
));
3212 memset(&config
, 0, sizeof(config
));
3214 config
.num_vdevs
= ab
->num_radios
* TARGET_NUM_VDEVS
;
3216 if (ab
->num_radios
== 2) {
3217 config
.num_peers
= TARGET_NUM_PEERS(DBS
);
3218 config
.num_tids
= TARGET_NUM_TIDS(DBS
);
3219 } else if (ab
->num_radios
== 3) {
3220 config
.num_peers
= TARGET_NUM_PEERS(DBS_SBS
);
3221 config
.num_tids
= TARGET_NUM_TIDS(DBS_SBS
);
3223 /* Control should not reach here */
3224 config
.num_peers
= TARGET_NUM_PEERS(SINGLE
);
3225 config
.num_tids
= TARGET_NUM_TIDS(SINGLE
);
3227 config
.num_offload_peers
= TARGET_NUM_OFFLD_PEERS
;
3228 config
.num_offload_reorder_buffs
= TARGET_NUM_OFFLD_REORDER_BUFFS
;
3229 config
.num_peer_keys
= TARGET_NUM_PEER_KEYS
;
3230 config
.ast_skid_limit
= TARGET_AST_SKID_LIMIT
;
3231 config
.tx_chain_mask
= (1 << ab
->target_caps
.num_rf_chains
) - 1;
3232 config
.rx_chain_mask
= (1 << ab
->target_caps
.num_rf_chains
) - 1;
3233 config
.rx_timeout_pri
[0] = TARGET_RX_TIMEOUT_LO_PRI
;
3234 config
.rx_timeout_pri
[1] = TARGET_RX_TIMEOUT_LO_PRI
;
3235 config
.rx_timeout_pri
[2] = TARGET_RX_TIMEOUT_LO_PRI
;
3236 config
.rx_timeout_pri
[3] = TARGET_RX_TIMEOUT_HI_PRI
;
3237 config
.rx_decap_mode
= TARGET_DECAP_MODE_NATIVE_WIFI
;
3238 config
.scan_max_pending_req
= TARGET_SCAN_MAX_PENDING_REQS
;
3239 config
.bmiss_offload_max_vdev
= TARGET_BMISS_OFFLOAD_MAX_VDEV
;
3240 config
.roam_offload_max_vdev
= TARGET_ROAM_OFFLOAD_MAX_VDEV
;
3241 config
.roam_offload_max_ap_profiles
= TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES
;
3242 config
.num_mcast_groups
= TARGET_NUM_MCAST_GROUPS
;
3243 config
.num_mcast_table_elems
= TARGET_NUM_MCAST_TABLE_ELEMS
;
3244 config
.mcast2ucast_mode
= TARGET_MCAST2UCAST_MODE
;
3245 config
.tx_dbg_log_size
= TARGET_TX_DBG_LOG_SIZE
;
3246 config
.num_wds_entries
= TARGET_NUM_WDS_ENTRIES
;
3247 config
.dma_burst_size
= TARGET_DMA_BURST_SIZE
;
3248 config
.rx_skip_defrag_timeout_dup_detection_check
=
3249 TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK
;
3250 config
.vow_config
= TARGET_VOW_CONFIG
;
3251 config
.gtk_offload_max_vdev
= TARGET_GTK_OFFLOAD_MAX_VDEV
;
3252 config
.num_msdu_desc
= TARGET_NUM_MSDU_DESC
;
3253 config
.beacon_tx_offload_max_vdev
= ab
->num_radios
* TARGET_MAX_BCN_OFFLD
;
3254 config
.rx_batchmode
= TARGET_RX_BATCHMODE
;
3255 config
.peer_map_unmap_v2_support
= 1;
3256 config
.twt_ap_pdev_count
= ab
->num_radios
;
3257 config
.twt_ap_sta_count
= 1000;
3259 memcpy(&wmi_sc
->wlan_resource_config
, &config
, sizeof(config
));
3261 init_param
.res_cfg
= &wmi_sc
->wlan_resource_config
;
3262 init_param
.num_mem_chunks
= wmi_sc
->num_mem_chunks
;
3263 init_param
.hw_mode_id
= wmi_sc
->preferred_hw_mode
;
3264 init_param
.mem_chunks
= wmi_sc
->mem_chunks
;
3266 if (wmi_sc
->preferred_hw_mode
== WMI_HOST_HW_MODE_SINGLE
)
3267 init_param
.hw_mode_id
= WMI_HOST_HW_MODE_MAX
;
3269 init_param
.num_band_to_mac
= ab
->num_radios
;
3271 ath11k_fill_band_to_mac_param(ab
, init_param
.band_to_mac
);
3273 return ath11k_init_cmd_send(&wmi_sc
->wmi
[0], &init_param
);
3276 static int ath11k_wmi_tlv_hw_mode_caps_parse(struct ath11k_base
*soc
,
3278 const void *ptr
, void *data
)
3280 struct wmi_tlv_svc_rdy_ext_parse
*svc_rdy_ext
= data
;
3281 struct wmi_hw_mode_capabilities
*hw_mode_cap
;
3284 if (tag
!= WMI_TAG_HW_MODE_CAPABILITIES
)
3287 if (svc_rdy_ext
->n_hw_mode_caps
>= svc_rdy_ext
->param
.num_hw_modes
)
3290 hw_mode_cap
= container_of(ptr
, struct wmi_hw_mode_capabilities
,
3292 svc_rdy_ext
->n_hw_mode_caps
++;
3294 phy_map
= hw_mode_cap
->phy_id_map
;
3296 svc_rdy_ext
->tot_phy_id
++;
3297 phy_map
= phy_map
>> 1;
3303 static int ath11k_wmi_tlv_hw_mode_caps(struct ath11k_base
*soc
,
3304 u16 len
, const void *ptr
, void *data
)
3306 struct wmi_tlv_svc_rdy_ext_parse
*svc_rdy_ext
= data
;
3307 struct wmi_hw_mode_capabilities
*hw_mode_caps
;
3308 enum wmi_host_hw_mode_config_type mode
, pref
;
3312 svc_rdy_ext
->n_hw_mode_caps
= 0;
3313 svc_rdy_ext
->hw_mode_caps
= (struct wmi_hw_mode_capabilities
*)ptr
;
3315 ret
= ath11k_wmi_tlv_iter(soc
, ptr
, len
,
3316 ath11k_wmi_tlv_hw_mode_caps_parse
,
3319 ath11k_warn(soc
, "failed to parse tlv %d\n", ret
);
3324 while (i
< svc_rdy_ext
->n_hw_mode_caps
) {
3325 hw_mode_caps
= &svc_rdy_ext
->hw_mode_caps
[i
];
3326 mode
= hw_mode_caps
->hw_mode_id
;
3327 pref
= soc
->wmi_ab
.preferred_hw_mode
;
3329 if (ath11k_hw_mode_pri_map
[mode
] < ath11k_hw_mode_pri_map
[pref
]) {
3330 svc_rdy_ext
->pref_hw_mode_caps
= *hw_mode_caps
;
3331 soc
->wmi_ab
.preferred_hw_mode
= mode
;
3336 if (soc
->wmi_ab
.preferred_hw_mode
== WMI_HOST_HW_MODE_MAX
)
3342 static int ath11k_wmi_tlv_mac_phy_caps_parse(struct ath11k_base
*soc
,
3344 const void *ptr
, void *data
)
3346 struct wmi_tlv_svc_rdy_ext_parse
*svc_rdy_ext
= data
;
3348 if (tag
!= WMI_TAG_MAC_PHY_CAPABILITIES
)
3351 if (svc_rdy_ext
->n_mac_phy_caps
>= svc_rdy_ext
->tot_phy_id
)
3354 len
= min_t(u16
, len
, sizeof(struct wmi_mac_phy_capabilities
));
3355 if (!svc_rdy_ext
->n_mac_phy_caps
) {
3356 svc_rdy_ext
->mac_phy_caps
= kzalloc((svc_rdy_ext
->tot_phy_id
) * len
,
3358 if (!svc_rdy_ext
->mac_phy_caps
)
3362 memcpy(svc_rdy_ext
->mac_phy_caps
+ svc_rdy_ext
->n_mac_phy_caps
, ptr
, len
);
3363 svc_rdy_ext
->n_mac_phy_caps
++;
3367 static int ath11k_wmi_tlv_ext_hal_reg_caps_parse(struct ath11k_base
*soc
,
3369 const void *ptr
, void *data
)
3371 struct wmi_tlv_svc_rdy_ext_parse
*svc_rdy_ext
= data
;
3373 if (tag
!= WMI_TAG_HAL_REG_CAPABILITIES_EXT
)
3376 if (svc_rdy_ext
->n_ext_hal_reg_caps
>= svc_rdy_ext
->param
.num_phy
)
3379 svc_rdy_ext
->n_ext_hal_reg_caps
++;
3383 static int ath11k_wmi_tlv_ext_hal_reg_caps(struct ath11k_base
*soc
,
3384 u16 len
, const void *ptr
, void *data
)
3386 struct ath11k_pdev_wmi
*wmi_handle
= &soc
->wmi_ab
.wmi
[0];
3387 struct wmi_tlv_svc_rdy_ext_parse
*svc_rdy_ext
= data
;
3388 struct ath11k_hal_reg_capabilities_ext reg_cap
;
3392 svc_rdy_ext
->n_ext_hal_reg_caps
= 0;
3393 svc_rdy_ext
->ext_hal_reg_caps
= (struct wmi_hal_reg_capabilities_ext
*)ptr
;
3394 ret
= ath11k_wmi_tlv_iter(soc
, ptr
, len
,
3395 ath11k_wmi_tlv_ext_hal_reg_caps_parse
,
3398 ath11k_warn(soc
, "failed to parse tlv %d\n", ret
);
3402 for (i
= 0; i
< svc_rdy_ext
->param
.num_phy
; i
++) {
3403 ret
= ath11k_pull_reg_cap_svc_rdy_ext(wmi_handle
,
3404 svc_rdy_ext
->soc_hal_reg_caps
,
3405 svc_rdy_ext
->ext_hal_reg_caps
, i
,
3408 ath11k_warn(soc
, "failed to extract reg cap %d\n", i
);
3412 memcpy(&soc
->hal_reg_cap
[reg_cap
.phy_id
],
3413 ®_cap
, sizeof(reg_cap
));
3418 static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base
*soc
,
3419 u16 len
, const void *ptr
,
3422 struct ath11k_pdev_wmi
*wmi_handle
= &soc
->wmi_ab
.wmi
[0];
3423 struct wmi_tlv_svc_rdy_ext_parse
*svc_rdy_ext
= data
;
3424 u8 hw_mode_id
= svc_rdy_ext
->pref_hw_mode_caps
.hw_mode_id
;
3428 svc_rdy_ext
->soc_hal_reg_caps
= (struct wmi_soc_hal_reg_capabilities
*)ptr
;
3429 svc_rdy_ext
->param
.num_phy
= svc_rdy_ext
->soc_hal_reg_caps
->num_phy
;
3431 soc
->num_radios
= 0;
3432 phy_id_map
= svc_rdy_ext
->pref_hw_mode_caps
.phy_id_map
;
3434 while (phy_id_map
&& soc
->num_radios
< MAX_RADIOS
) {
3435 ret
= ath11k_pull_mac_phy_cap_svc_ready_ext(wmi_handle
,
3436 svc_rdy_ext
->hw_caps
,
3437 svc_rdy_ext
->hw_mode_caps
,
3438 svc_rdy_ext
->soc_hal_reg_caps
,
3439 svc_rdy_ext
->mac_phy_caps
,
3440 hw_mode_id
, soc
->num_radios
,
3441 &soc
->pdevs
[soc
->num_radios
]);
3443 ath11k_warn(soc
, "failed to extract mac caps, idx :%d\n",
3450 /* TODO: mac_phy_cap prints */
3456 static int ath11k_wmi_tlv_svc_rdy_ext_parse(struct ath11k_base
*ab
,
3458 const void *ptr
, void *data
)
3460 struct ath11k_pdev_wmi
*wmi_handle
= &ab
->wmi_ab
.wmi
[0];
3461 struct wmi_tlv_svc_rdy_ext_parse
*svc_rdy_ext
= data
;
3465 case WMI_TAG_SERVICE_READY_EXT_EVENT
:
3466 ret
= ath11k_pull_svc_ready_ext(wmi_handle
, ptr
,
3467 &svc_rdy_ext
->param
);
3469 ath11k_warn(ab
, "unable to extract ext params\n");
3474 case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS
:
3475 svc_rdy_ext
->hw_caps
= (struct wmi_soc_mac_phy_hw_mode_caps
*)ptr
;
3476 svc_rdy_ext
->param
.num_hw_modes
= svc_rdy_ext
->hw_caps
->num_hw_modes
;
3479 case WMI_TAG_SOC_HAL_REG_CAPABILITIES
:
3480 ret
= ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(ab
, len
, ptr
,
3486 case WMI_TAG_ARRAY_STRUCT
:
3487 if (!svc_rdy_ext
->hw_mode_done
) {
3488 ret
= ath11k_wmi_tlv_hw_mode_caps(ab
, len
, ptr
,
3493 svc_rdy_ext
->hw_mode_done
= true;
3494 } else if (!svc_rdy_ext
->mac_phy_done
) {
3495 svc_rdy_ext
->n_mac_phy_caps
= 0;
3496 ret
= ath11k_wmi_tlv_iter(ab
, ptr
, len
,
3497 ath11k_wmi_tlv_mac_phy_caps_parse
,
3500 ath11k_warn(ab
, "failed to parse tlv %d\n", ret
);
3504 svc_rdy_ext
->mac_phy_done
= true;
3505 } else if (!svc_rdy_ext
->ext_hal_reg_done
) {
3506 ret
= ath11k_wmi_tlv_ext_hal_reg_caps(ab
, len
, ptr
,
3511 svc_rdy_ext
->ext_hal_reg_done
= true;
3512 complete(&ab
->wmi_ab
.service_ready
);
3522 static int ath11k_service_ready_ext_event(struct ath11k_base
*ab
,
3523 struct sk_buff
*skb
)
3525 struct wmi_tlv_svc_rdy_ext_parse svc_rdy_ext
= { };
3528 ret
= ath11k_wmi_tlv_iter(ab
, skb
->data
, skb
->len
,
3529 ath11k_wmi_tlv_svc_rdy_ext_parse
,
3532 ath11k_warn(ab
, "failed to parse tlv %d\n", ret
);
3536 kfree(svc_rdy_ext
.mac_phy_caps
);
3540 static int ath11k_pull_vdev_start_resp_tlv(struct ath11k_base
*ab
, struct sk_buff
*skb
,
3541 struct wmi_vdev_start_resp_event
*vdev_rsp
)
3544 const struct wmi_vdev_start_resp_event
*ev
;
3547 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
3550 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
3554 ev
= tb
[WMI_TAG_VDEV_START_RESPONSE_EVENT
];
3556 ath11k_warn(ab
, "failed to fetch vdev start resp ev");
3561 memset(vdev_rsp
, 0, sizeof(*vdev_rsp
));
3563 vdev_rsp
->vdev_id
= ev
->vdev_id
;
3564 vdev_rsp
->requestor_id
= ev
->requestor_id
;
3565 vdev_rsp
->resp_type
= ev
->resp_type
;
3566 vdev_rsp
->status
= ev
->status
;
3567 vdev_rsp
->chain_mask
= ev
->chain_mask
;
3568 vdev_rsp
->smps_mode
= ev
->smps_mode
;
3569 vdev_rsp
->mac_id
= ev
->mac_id
;
3570 vdev_rsp
->cfgd_tx_streams
= ev
->cfgd_tx_streams
;
3571 vdev_rsp
->cfgd_rx_streams
= ev
->cfgd_rx_streams
;
3577 static struct cur_reg_rule
3578 *create_reg_rules_from_wmi(u32 num_reg_rules
,
3579 struct wmi_regulatory_rule_struct
*wmi_reg_rule
)
3581 struct cur_reg_rule
*reg_rule_ptr
;
3584 reg_rule_ptr
= kzalloc((num_reg_rules
* sizeof(*reg_rule_ptr
)),
3590 for (count
= 0; count
< num_reg_rules
; count
++) {
3591 reg_rule_ptr
[count
].start_freq
=
3592 FIELD_GET(REG_RULE_START_FREQ
,
3593 wmi_reg_rule
[count
].freq_info
);
3594 reg_rule_ptr
[count
].end_freq
=
3595 FIELD_GET(REG_RULE_END_FREQ
,
3596 wmi_reg_rule
[count
].freq_info
);
3597 reg_rule_ptr
[count
].max_bw
=
3598 FIELD_GET(REG_RULE_MAX_BW
,
3599 wmi_reg_rule
[count
].bw_pwr_info
);
3600 reg_rule_ptr
[count
].reg_power
=
3601 FIELD_GET(REG_RULE_REG_PWR
,
3602 wmi_reg_rule
[count
].bw_pwr_info
);
3603 reg_rule_ptr
[count
].ant_gain
=
3604 FIELD_GET(REG_RULE_ANT_GAIN
,
3605 wmi_reg_rule
[count
].bw_pwr_info
);
3606 reg_rule_ptr
[count
].flags
=
3607 FIELD_GET(REG_RULE_FLAGS
,
3608 wmi_reg_rule
[count
].flag_info
);
3611 return reg_rule_ptr
;
3614 static int ath11k_pull_reg_chan_list_update_ev(struct ath11k_base
*ab
,
3615 struct sk_buff
*skb
,
3616 struct cur_regulatory_info
*reg_info
)
3619 const struct wmi_reg_chan_list_cc_event
*chan_list_event_hdr
;
3620 struct wmi_regulatory_rule_struct
*wmi_reg_rule
;
3621 u32 num_2g_reg_rules
, num_5g_reg_rules
;
3624 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "processing regulatory channel list\n");
3626 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
3629 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
3633 chan_list_event_hdr
= tb
[WMI_TAG_REG_CHAN_LIST_CC_EVENT
];
3634 if (!chan_list_event_hdr
) {
3635 ath11k_warn(ab
, "failed to fetch reg chan list update ev\n");
3640 reg_info
->num_2g_reg_rules
= chan_list_event_hdr
->num_2g_reg_rules
;
3641 reg_info
->num_5g_reg_rules
= chan_list_event_hdr
->num_5g_reg_rules
;
3643 if (!(reg_info
->num_2g_reg_rules
+ reg_info
->num_5g_reg_rules
)) {
3644 ath11k_warn(ab
, "No regulatory rules available in the event info\n");
3649 memcpy(reg_info
->alpha2
, &chan_list_event_hdr
->alpha2
,
3651 reg_info
->dfs_region
= chan_list_event_hdr
->dfs_region
;
3652 reg_info
->phybitmap
= chan_list_event_hdr
->phybitmap
;
3653 reg_info
->num_phy
= chan_list_event_hdr
->num_phy
;
3654 reg_info
->phy_id
= chan_list_event_hdr
->phy_id
;
3655 reg_info
->ctry_code
= chan_list_event_hdr
->country_id
;
3656 reg_info
->reg_dmn_pair
= chan_list_event_hdr
->domain_code
;
3657 if (chan_list_event_hdr
->status_code
== WMI_REG_SET_CC_STATUS_PASS
)
3658 reg_info
->status_code
= REG_SET_CC_STATUS_PASS
;
3659 else if (chan_list_event_hdr
->status_code
== WMI_REG_CURRENT_ALPHA2_NOT_FOUND
)
3660 reg_info
->status_code
= REG_CURRENT_ALPHA2_NOT_FOUND
;
3661 else if (chan_list_event_hdr
->status_code
== WMI_REG_INIT_ALPHA2_NOT_FOUND
)
3662 reg_info
->status_code
= REG_INIT_ALPHA2_NOT_FOUND
;
3663 else if (chan_list_event_hdr
->status_code
== WMI_REG_SET_CC_CHANGE_NOT_ALLOWED
)
3664 reg_info
->status_code
= REG_SET_CC_CHANGE_NOT_ALLOWED
;
3665 else if (chan_list_event_hdr
->status_code
== WMI_REG_SET_CC_STATUS_NO_MEMORY
)
3666 reg_info
->status_code
= REG_SET_CC_STATUS_NO_MEMORY
;
3667 else if (chan_list_event_hdr
->status_code
== WMI_REG_SET_CC_STATUS_FAIL
)
3668 reg_info
->status_code
= REG_SET_CC_STATUS_FAIL
;
3670 reg_info
->min_bw_2g
= chan_list_event_hdr
->min_bw_2g
;
3671 reg_info
->max_bw_2g
= chan_list_event_hdr
->max_bw_2g
;
3672 reg_info
->min_bw_5g
= chan_list_event_hdr
->min_bw_5g
;
3673 reg_info
->max_bw_5g
= chan_list_event_hdr
->max_bw_5g
;
3675 num_2g_reg_rules
= reg_info
->num_2g_reg_rules
;
3676 num_5g_reg_rules
= reg_info
->num_5g_reg_rules
;
3678 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
3679 "%s:cc %s dsf %d BW: min_2g %d max_2g %d min_5g %d max_5g %d",
3680 __func__
, reg_info
->alpha2
, reg_info
->dfs_region
,
3681 reg_info
->min_bw_2g
, reg_info
->max_bw_2g
,
3682 reg_info
->min_bw_5g
, reg_info
->max_bw_5g
);
3684 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
3685 "%s: num_2g_reg_rules %d num_5g_reg_rules %d", __func__
,
3686 num_2g_reg_rules
, num_5g_reg_rules
);
3689 (struct wmi_regulatory_rule_struct
*)((u8
*)chan_list_event_hdr
3690 + sizeof(*chan_list_event_hdr
)
3691 + sizeof(struct wmi_tlv
));
3693 if (num_2g_reg_rules
) {
3694 reg_info
->reg_rules_2g_ptr
= create_reg_rules_from_wmi(num_2g_reg_rules
,
3696 if (!reg_info
->reg_rules_2g_ptr
) {
3698 ath11k_warn(ab
, "Unable to Allocate memory for 2g rules\n");
3703 if (num_5g_reg_rules
) {
3704 wmi_reg_rule
+= num_2g_reg_rules
;
3705 reg_info
->reg_rules_5g_ptr
= create_reg_rules_from_wmi(num_5g_reg_rules
,
3707 if (!reg_info
->reg_rules_5g_ptr
) {
3709 ath11k_warn(ab
, "Unable to Allocate memory for 5g rules\n");
3714 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "processed regulatory channel list\n");
3720 static int ath11k_pull_peer_del_resp_ev(struct ath11k_base
*ab
, struct sk_buff
*skb
,
3721 struct wmi_peer_delete_resp_event
*peer_del_resp
)
3724 const struct wmi_peer_delete_resp_event
*ev
;
3727 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
3730 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
3734 ev
= tb
[WMI_TAG_PEER_DELETE_RESP_EVENT
];
3736 ath11k_warn(ab
, "failed to fetch peer delete resp ev");
3741 memset(peer_del_resp
, 0, sizeof(*peer_del_resp
));
3743 peer_del_resp
->vdev_id
= ev
->vdev_id
;
3744 ether_addr_copy(peer_del_resp
->peer_macaddr
.addr
,
3745 ev
->peer_macaddr
.addr
);
3751 static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base
*ab
, void *evt_buf
,
3752 u32 len
, u32
*vdev_id
,
3756 const struct wmi_bcn_tx_status_event
*ev
;
3759 tb
= ath11k_wmi_tlv_parse_alloc(ab
, evt_buf
, len
, GFP_ATOMIC
);
3762 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
3766 ev
= tb
[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT
];
3768 ath11k_warn(ab
, "failed to fetch bcn tx status ev");
3773 *vdev_id
= ev
->vdev_id
;
3774 *tx_status
= ev
->tx_status
;
3780 static int ath11k_pull_vdev_stopped_param_tlv(struct ath11k_base
*ab
, struct sk_buff
*skb
,
3784 const struct wmi_vdev_stopped_event
*ev
;
3787 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
3790 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
3794 ev
= tb
[WMI_TAG_VDEV_STOPPED_EVENT
];
3796 ath11k_warn(ab
, "failed to fetch vdev stop ev");
3801 *vdev_id
= ev
->vdev_id
;
3807 static int ath11k_pull_mgmt_rx_params_tlv(struct ath11k_base
*ab
,
3808 struct sk_buff
*skb
,
3809 struct mgmt_rx_event_params
*hdr
)
3812 const struct wmi_mgmt_rx_hdr
*ev
;
3816 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
3819 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
3823 ev
= tb
[WMI_TAG_MGMT_RX_HDR
];
3824 frame
= tb
[WMI_TAG_ARRAY_BYTE
];
3826 if (!ev
|| !frame
) {
3827 ath11k_warn(ab
, "failed to fetch mgmt rx hdr");
3832 hdr
->pdev_id
= ev
->pdev_id
;
3833 hdr
->channel
= ev
->channel
;
3835 hdr
->rate
= ev
->rate
;
3836 hdr
->phy_mode
= ev
->phy_mode
;
3837 hdr
->buf_len
= ev
->buf_len
;
3838 hdr
->status
= ev
->status
;
3839 hdr
->flags
= ev
->flags
;
3840 hdr
->rssi
= ev
->rssi
;
3841 hdr
->tsf_delta
= ev
->tsf_delta
;
3842 memcpy(hdr
->rssi_ctl
, ev
->rssi_ctl
, sizeof(hdr
->rssi_ctl
));
3844 if (skb
->len
< (frame
- skb
->data
) + hdr
->buf_len
) {
3845 ath11k_warn(ab
, "invalid length in mgmt rx hdr ev");
3850 /* shift the sk_buff to point to `frame` */
3852 skb_put(skb
, frame
- skb
->data
);
3853 skb_pull(skb
, frame
- skb
->data
);
3854 skb_put(skb
, hdr
->buf_len
);
3856 ath11k_ce_byte_swap(skb
->data
, hdr
->buf_len
);
3862 static int wmi_process_mgmt_tx_comp(struct ath11k
*ar
, u32 desc_id
,
3865 struct sk_buff
*msdu
;
3866 struct ieee80211_tx_info
*info
;
3867 struct ath11k_skb_cb
*skb_cb
;
3869 spin_lock_bh(&ar
->txmgmt_idr_lock
);
3870 msdu
= idr_find(&ar
->txmgmt_idr
, desc_id
);
3873 ath11k_warn(ar
->ab
, "received mgmt tx compl for invalid msdu_id: %d\n",
3875 spin_unlock_bh(&ar
->txmgmt_idr_lock
);
3879 idr_remove(&ar
->txmgmt_idr
, desc_id
);
3880 spin_unlock_bh(&ar
->txmgmt_idr_lock
);
3882 skb_cb
= ATH11K_SKB_CB(msdu
);
3883 dma_unmap_single(ar
->ab
->dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
3885 info
= IEEE80211_SKB_CB(msdu
);
3886 if ((!(info
->flags
& IEEE80211_TX_CTL_NO_ACK
)) && !status
)
3887 info
->flags
|= IEEE80211_TX_STAT_ACK
;
3889 ieee80211_tx_status_irqsafe(ar
->hw
, msdu
);
3891 /* WARN when we received this event without doing any mgmt tx */
3892 if (atomic_dec_if_positive(&ar
->num_pending_mgmt_tx
) < 0)
3898 static int ath11k_pull_mgmt_tx_compl_param_tlv(struct ath11k_base
*ab
,
3899 struct sk_buff
*skb
,
3900 struct wmi_mgmt_tx_compl_event
*param
)
3903 const struct wmi_mgmt_tx_compl_event
*ev
;
3906 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
3909 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
3913 ev
= tb
[WMI_TAG_MGMT_TX_COMPL_EVENT
];
3915 ath11k_warn(ab
, "failed to fetch mgmt tx compl ev");
3920 param
->pdev_id
= ev
->pdev_id
;
3921 param
->desc_id
= ev
->desc_id
;
3922 param
->status
= ev
->status
;
3928 static void ath11k_wmi_event_scan_started(struct ath11k
*ar
)
3930 lockdep_assert_held(&ar
->data_lock
);
3932 switch (ar
->scan
.state
) {
3933 case ATH11K_SCAN_IDLE
:
3934 case ATH11K_SCAN_RUNNING
:
3935 case ATH11K_SCAN_ABORTING
:
3936 ath11k_warn(ar
->ab
, "received scan started event in an invalid scan state: %s (%d)\n",
3937 ath11k_scan_state_str(ar
->scan
.state
),
3940 case ATH11K_SCAN_STARTING
:
3941 ar
->scan
.state
= ATH11K_SCAN_RUNNING
;
3942 complete(&ar
->scan
.started
);
3947 static void ath11k_wmi_event_scan_start_failed(struct ath11k
*ar
)
3949 lockdep_assert_held(&ar
->data_lock
);
3951 switch (ar
->scan
.state
) {
3952 case ATH11K_SCAN_IDLE
:
3953 case ATH11K_SCAN_RUNNING
:
3954 case ATH11K_SCAN_ABORTING
:
3955 ath11k_warn(ar
->ab
, "received scan start failed event in an invalid scan state: %s (%d)\n",
3956 ath11k_scan_state_str(ar
->scan
.state
),
3959 case ATH11K_SCAN_STARTING
:
3960 complete(&ar
->scan
.started
);
3961 __ath11k_mac_scan_finish(ar
);
3966 static void ath11k_wmi_event_scan_completed(struct ath11k
*ar
)
3968 lockdep_assert_held(&ar
->data_lock
);
3970 switch (ar
->scan
.state
) {
3971 case ATH11K_SCAN_IDLE
:
3972 case ATH11K_SCAN_STARTING
:
3973 /* One suspected reason scan can be completed while starting is
3974 * if firmware fails to deliver all scan events to the host,
3975 * e.g. when transport pipe is full. This has been observed
3976 * with spectral scan phyerr events starving wmi transport
3977 * pipe. In such case the "scan completed" event should be (and
3978 * is) ignored by the host as it may be just firmware's scan
3979 * state machine recovering.
3981 ath11k_warn(ar
->ab
, "received scan completed event in an invalid scan state: %s (%d)\n",
3982 ath11k_scan_state_str(ar
->scan
.state
),
3985 case ATH11K_SCAN_RUNNING
:
3986 case ATH11K_SCAN_ABORTING
:
3987 __ath11k_mac_scan_finish(ar
);
3992 static void ath11k_wmi_event_scan_bss_chan(struct ath11k
*ar
)
3994 lockdep_assert_held(&ar
->data_lock
);
3996 switch (ar
->scan
.state
) {
3997 case ATH11K_SCAN_IDLE
:
3998 case ATH11K_SCAN_STARTING
:
3999 ath11k_warn(ar
->ab
, "received scan bss chan event in an invalid scan state: %s (%d)\n",
4000 ath11k_scan_state_str(ar
->scan
.state
),
4003 case ATH11K_SCAN_RUNNING
:
4004 case ATH11K_SCAN_ABORTING
:
4005 ar
->scan_channel
= NULL
;
4010 static void ath11k_wmi_event_scan_foreign_chan(struct ath11k
*ar
, u32 freq
)
4012 lockdep_assert_held(&ar
->data_lock
);
4014 switch (ar
->scan
.state
) {
4015 case ATH11K_SCAN_IDLE
:
4016 case ATH11K_SCAN_STARTING
:
4017 ath11k_warn(ar
->ab
, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
4018 ath11k_scan_state_str(ar
->scan
.state
),
4021 case ATH11K_SCAN_RUNNING
:
4022 case ATH11K_SCAN_ABORTING
:
4023 ar
->scan_channel
= ieee80211_get_channel(ar
->hw
->wiphy
, freq
);
4029 ath11k_wmi_event_scan_type_str(enum wmi_scan_event_type type
,
4030 enum wmi_scan_completion_reason reason
)
4033 case WMI_SCAN_EVENT_STARTED
:
4035 case WMI_SCAN_EVENT_COMPLETED
:
4037 case WMI_SCAN_REASON_COMPLETED
:
4039 case WMI_SCAN_REASON_CANCELLED
:
4040 return "completed [cancelled]";
4041 case WMI_SCAN_REASON_PREEMPTED
:
4042 return "completed [preempted]";
4043 case WMI_SCAN_REASON_TIMEDOUT
:
4044 return "completed [timedout]";
4045 case WMI_SCAN_REASON_INTERNAL_FAILURE
:
4046 return "completed [internal err]";
4047 case WMI_SCAN_REASON_MAX
:
4050 return "completed [unknown]";
4051 case WMI_SCAN_EVENT_BSS_CHANNEL
:
4052 return "bss channel";
4053 case WMI_SCAN_EVENT_FOREIGN_CHAN
:
4054 return "foreign channel";
4055 case WMI_SCAN_EVENT_DEQUEUED
:
4057 case WMI_SCAN_EVENT_PREEMPTED
:
4059 case WMI_SCAN_EVENT_START_FAILED
:
4060 return "start failed";
4061 case WMI_SCAN_EVENT_RESTARTED
:
4063 case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT
:
4064 return "foreign channel exit";
4070 static int ath11k_pull_scan_ev(struct ath11k_base
*ab
, struct sk_buff
*skb
,
4071 struct wmi_scan_event
*scan_evt_param
)
4074 const struct wmi_scan_event
*ev
;
4077 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4080 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4084 ev
= tb
[WMI_TAG_SCAN_EVENT
];
4086 ath11k_warn(ab
, "failed to fetch scan ev");
4091 scan_evt_param
->event_type
= ev
->event_type
;
4092 scan_evt_param
->reason
= ev
->reason
;
4093 scan_evt_param
->channel_freq
= ev
->channel_freq
;
4094 scan_evt_param
->scan_req_id
= ev
->scan_req_id
;
4095 scan_evt_param
->scan_id
= ev
->scan_id
;
4096 scan_evt_param
->vdev_id
= ev
->vdev_id
;
4097 scan_evt_param
->tsf_timestamp
= ev
->tsf_timestamp
;
4103 static int ath11k_pull_peer_sta_kickout_ev(struct ath11k_base
*ab
, struct sk_buff
*skb
,
4104 struct wmi_peer_sta_kickout_arg
*arg
)
4107 const struct wmi_peer_sta_kickout_event
*ev
;
4110 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4113 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4117 ev
= tb
[WMI_TAG_PEER_STA_KICKOUT_EVENT
];
4119 ath11k_warn(ab
, "failed to fetch peer sta kickout ev");
4124 arg
->mac_addr
= ev
->peer_macaddr
.addr
;
4130 static int ath11k_pull_roam_ev(struct ath11k_base
*ab
, struct sk_buff
*skb
,
4131 struct wmi_roam_event
*roam_ev
)
4134 const struct wmi_roam_event
*ev
;
4137 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4140 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4144 ev
= tb
[WMI_TAG_ROAM_EVENT
];
4146 ath11k_warn(ab
, "failed to fetch roam ev");
4151 roam_ev
->vdev_id
= ev
->vdev_id
;
4152 roam_ev
->reason
= ev
->reason
;
4153 roam_ev
->rssi
= ev
->rssi
;
4159 static int freq_to_idx(struct ath11k
*ar
, int freq
)
4161 struct ieee80211_supported_band
*sband
;
4162 int band
, ch
, idx
= 0;
4164 for (band
= NL80211_BAND_2GHZ
; band
< NUM_NL80211_BANDS
; band
++) {
4165 sband
= ar
->hw
->wiphy
->bands
[band
];
4169 for (ch
= 0; ch
< sband
->n_channels
; ch
++, idx
++)
4170 if (sband
->channels
[ch
].center_freq
== freq
)
4178 static int ath11k_pull_chan_info_ev(struct ath11k_base
*ab
, u8
*evt_buf
,
4179 u32 len
, struct wmi_chan_info_event
*ch_info_ev
)
4182 const struct wmi_chan_info_event
*ev
;
4185 tb
= ath11k_wmi_tlv_parse_alloc(ab
, evt_buf
, len
, GFP_ATOMIC
);
4188 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4192 ev
= tb
[WMI_TAG_CHAN_INFO_EVENT
];
4194 ath11k_warn(ab
, "failed to fetch chan info ev");
4199 ch_info_ev
->err_code
= ev
->err_code
;
4200 ch_info_ev
->freq
= ev
->freq
;
4201 ch_info_ev
->cmd_flags
= ev
->cmd_flags
;
4202 ch_info_ev
->noise_floor
= ev
->noise_floor
;
4203 ch_info_ev
->rx_clear_count
= ev
->rx_clear_count
;
4204 ch_info_ev
->cycle_count
= ev
->cycle_count
;
4205 ch_info_ev
->chan_tx_pwr_range
= ev
->chan_tx_pwr_range
;
4206 ch_info_ev
->chan_tx_pwr_tp
= ev
->chan_tx_pwr_tp
;
4207 ch_info_ev
->rx_frame_count
= ev
->rx_frame_count
;
4208 ch_info_ev
->tx_frame_cnt
= ev
->tx_frame_cnt
;
4209 ch_info_ev
->mac_clk_mhz
= ev
->mac_clk_mhz
;
4210 ch_info_ev
->vdev_id
= ev
->vdev_id
;
4217 ath11k_pull_pdev_bss_chan_info_ev(struct ath11k_base
*ab
, struct sk_buff
*skb
,
4218 struct wmi_pdev_bss_chan_info_event
*bss_ch_info_ev
)
4221 const struct wmi_pdev_bss_chan_info_event
*ev
;
4224 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4227 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4231 ev
= tb
[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT
];
4233 ath11k_warn(ab
, "failed to fetch pdev bss chan info ev");
4238 bss_ch_info_ev
->pdev_id
= ev
->pdev_id
;
4239 bss_ch_info_ev
->freq
= ev
->freq
;
4240 bss_ch_info_ev
->noise_floor
= ev
->noise_floor
;
4241 bss_ch_info_ev
->rx_clear_count_low
= ev
->rx_clear_count_low
;
4242 bss_ch_info_ev
->rx_clear_count_high
= ev
->rx_clear_count_high
;
4243 bss_ch_info_ev
->cycle_count_low
= ev
->cycle_count_low
;
4244 bss_ch_info_ev
->cycle_count_high
= ev
->cycle_count_high
;
4245 bss_ch_info_ev
->tx_cycle_count_low
= ev
->tx_cycle_count_low
;
4246 bss_ch_info_ev
->tx_cycle_count_high
= ev
->tx_cycle_count_high
;
4247 bss_ch_info_ev
->rx_cycle_count_low
= ev
->rx_cycle_count_low
;
4248 bss_ch_info_ev
->rx_cycle_count_high
= ev
->rx_cycle_count_high
;
4249 bss_ch_info_ev
->rx_bss_cycle_count_low
= ev
->rx_bss_cycle_count_low
;
4250 bss_ch_info_ev
->rx_bss_cycle_count_high
= ev
->rx_bss_cycle_count_high
;
4257 ath11k_pull_vdev_install_key_compl_ev(struct ath11k_base
*ab
, struct sk_buff
*skb
,
4258 struct wmi_vdev_install_key_complete_arg
*arg
)
4261 const struct wmi_vdev_install_key_compl_event
*ev
;
4264 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4267 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4271 ev
= tb
[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT
];
4273 ath11k_warn(ab
, "failed to fetch vdev install key compl ev");
4278 arg
->vdev_id
= ev
->vdev_id
;
4279 arg
->macaddr
= ev
->peer_macaddr
.addr
;
4280 arg
->key_idx
= ev
->key_idx
;
4281 arg
->key_flags
= ev
->key_flags
;
4282 arg
->status
= ev
->status
;
4288 static int ath11k_pull_peer_assoc_conf_ev(struct ath11k_base
*ab
, struct sk_buff
*skb
,
4289 struct wmi_peer_assoc_conf_arg
*peer_assoc_conf
)
4292 const struct wmi_peer_assoc_conf_event
*ev
;
4295 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4298 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4302 ev
= tb
[WMI_TAG_PEER_ASSOC_CONF_EVENT
];
4304 ath11k_warn(ab
, "failed to fetch peer assoc conf ev");
4309 peer_assoc_conf
->vdev_id
= ev
->vdev_id
;
4310 peer_assoc_conf
->macaddr
= ev
->peer_macaddr
.addr
;
4316 static void ath11k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base
*src
,
4317 struct ath11k_fw_stats_pdev
*dst
)
4319 dst
->ch_noise_floor
= src
->chan_nf
;
4320 dst
->tx_frame_count
= src
->tx_frame_count
;
4321 dst
->rx_frame_count
= src
->rx_frame_count
;
4322 dst
->rx_clear_count
= src
->rx_clear_count
;
4323 dst
->cycle_count
= src
->cycle_count
;
4324 dst
->phy_err_count
= src
->phy_err_count
;
4325 dst
->chan_tx_power
= src
->chan_tx_pwr
;
4329 ath11k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx
*src
,
4330 struct ath11k_fw_stats_pdev
*dst
)
4332 dst
->comp_queued
= src
->comp_queued
;
4333 dst
->comp_delivered
= src
->comp_delivered
;
4334 dst
->msdu_enqued
= src
->msdu_enqued
;
4335 dst
->mpdu_enqued
= src
->mpdu_enqued
;
4336 dst
->wmm_drop
= src
->wmm_drop
;
4337 dst
->local_enqued
= src
->local_enqued
;
4338 dst
->local_freed
= src
->local_freed
;
4339 dst
->hw_queued
= src
->hw_queued
;
4340 dst
->hw_reaped
= src
->hw_reaped
;
4341 dst
->underrun
= src
->underrun
;
4342 dst
->tx_abort
= src
->tx_abort
;
4343 dst
->mpdus_requed
= src
->mpdus_requed
;
4344 dst
->tx_ko
= src
->tx_ko
;
4345 dst
->data_rc
= src
->data_rc
;
4346 dst
->self_triggers
= src
->self_triggers
;
4347 dst
->sw_retry_failure
= src
->sw_retry_failure
;
4348 dst
->illgl_rate_phy_err
= src
->illgl_rate_phy_err
;
4349 dst
->pdev_cont_xretry
= src
->pdev_cont_xretry
;
4350 dst
->pdev_tx_timeout
= src
->pdev_tx_timeout
;
4351 dst
->pdev_resets
= src
->pdev_resets
;
4352 dst
->stateless_tid_alloc_failure
= src
->stateless_tid_alloc_failure
;
4353 dst
->phy_underrun
= src
->phy_underrun
;
4354 dst
->txop_ovf
= src
->txop_ovf
;
4357 static void ath11k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx
*src
,
4358 struct ath11k_fw_stats_pdev
*dst
)
4360 dst
->mid_ppdu_route_change
= src
->mid_ppdu_route_change
;
4361 dst
->status_rcvd
= src
->status_rcvd
;
4362 dst
->r0_frags
= src
->r0_frags
;
4363 dst
->r1_frags
= src
->r1_frags
;
4364 dst
->r2_frags
= src
->r2_frags
;
4365 dst
->r3_frags
= src
->r3_frags
;
4366 dst
->htt_msdus
= src
->htt_msdus
;
4367 dst
->htt_mpdus
= src
->htt_mpdus
;
4368 dst
->loc_msdus
= src
->loc_msdus
;
4369 dst
->loc_mpdus
= src
->loc_mpdus
;
4370 dst
->oversize_amsdu
= src
->oversize_amsdu
;
4371 dst
->phy_errs
= src
->phy_errs
;
4372 dst
->phy_err_drop
= src
->phy_err_drop
;
4373 dst
->mpdu_errs
= src
->mpdu_errs
;
4377 ath11k_wmi_pull_vdev_stats(const struct wmi_vdev_stats
*src
,
4378 struct ath11k_fw_stats_vdev
*dst
)
4382 dst
->vdev_id
= src
->vdev_id
;
4383 dst
->beacon_snr
= src
->beacon_snr
;
4384 dst
->data_snr
= src
->data_snr
;
4385 dst
->num_rx_frames
= src
->num_rx_frames
;
4386 dst
->num_rts_fail
= src
->num_rts_fail
;
4387 dst
->num_rts_success
= src
->num_rts_success
;
4388 dst
->num_rx_err
= src
->num_rx_err
;
4389 dst
->num_rx_discard
= src
->num_rx_discard
;
4390 dst
->num_tx_not_acked
= src
->num_tx_not_acked
;
4392 for (i
= 0; i
< ARRAY_SIZE(src
->num_tx_frames
); i
++)
4393 dst
->num_tx_frames
[i
] = src
->num_tx_frames
[i
];
4395 for (i
= 0; i
< ARRAY_SIZE(src
->num_tx_frames_retries
); i
++)
4396 dst
->num_tx_frames_retries
[i
] = src
->num_tx_frames_retries
[i
];
4398 for (i
= 0; i
< ARRAY_SIZE(src
->num_tx_frames_failures
); i
++)
4399 dst
->num_tx_frames_failures
[i
] = src
->num_tx_frames_failures
[i
];
4401 for (i
= 0; i
< ARRAY_SIZE(src
->tx_rate_history
); i
++)
4402 dst
->tx_rate_history
[i
] = src
->tx_rate_history
[i
];
4404 for (i
= 0; i
< ARRAY_SIZE(src
->beacon_rssi_history
); i
++)
4405 dst
->beacon_rssi_history
[i
] = src
->beacon_rssi_history
[i
];
4409 ath11k_wmi_pull_bcn_stats(const struct wmi_bcn_stats
*src
,
4410 struct ath11k_fw_stats_bcn
*dst
)
4412 dst
->vdev_id
= src
->vdev_id
;
4413 dst
->tx_bcn_succ_cnt
= src
->tx_bcn_succ_cnt
;
4414 dst
->tx_bcn_outage_cnt
= src
->tx_bcn_outage_cnt
;
4417 int ath11k_wmi_pull_fw_stats(struct ath11k_base
*ab
, struct sk_buff
*skb
,
4418 struct ath11k_fw_stats
*stats
)
4421 const struct wmi_stats_event
*ev
;
4426 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, len
, GFP_ATOMIC
);
4429 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4433 ev
= tb
[WMI_TAG_STATS_EVENT
];
4434 data
= tb
[WMI_TAG_ARRAY_BYTE
];
4436 ath11k_warn(ab
, "failed to fetch update stats ev");
4441 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
4442 "wmi stats update ev pdev_id %d pdev %i vdev %i bcn %i\n",
4444 ev
->num_pdev_stats
, ev
->num_vdev_stats
,
4447 stats
->pdev_id
= ev
->pdev_id
;
4448 stats
->stats_id
= 0;
4450 for (i
= 0; i
< ev
->num_pdev_stats
; i
++) {
4451 const struct wmi_pdev_stats
*src
;
4452 struct ath11k_fw_stats_pdev
*dst
;
4455 if (len
< sizeof(*src
)) {
4460 stats
->stats_id
= WMI_REQUEST_PDEV_STAT
;
4462 data
+= sizeof(*src
);
4463 len
-= sizeof(*src
);
4465 dst
= kzalloc(sizeof(*dst
), GFP_ATOMIC
);
4469 ath11k_wmi_pull_pdev_stats_base(&src
->base
, dst
);
4470 ath11k_wmi_pull_pdev_stats_tx(&src
->tx
, dst
);
4471 ath11k_wmi_pull_pdev_stats_rx(&src
->rx
, dst
);
4472 list_add_tail(&dst
->list
, &stats
->pdevs
);
4475 for (i
= 0; i
< ev
->num_vdev_stats
; i
++) {
4476 const struct wmi_vdev_stats
*src
;
4477 struct ath11k_fw_stats_vdev
*dst
;
4480 if (len
< sizeof(*src
)) {
4485 stats
->stats_id
= WMI_REQUEST_VDEV_STAT
;
4487 data
+= sizeof(*src
);
4488 len
-= sizeof(*src
);
4490 dst
= kzalloc(sizeof(*dst
), GFP_ATOMIC
);
4494 ath11k_wmi_pull_vdev_stats(src
, dst
);
4495 list_add_tail(&dst
->list
, &stats
->vdevs
);
4498 for (i
= 0; i
< ev
->num_bcn_stats
; i
++) {
4499 const struct wmi_bcn_stats
*src
;
4500 struct ath11k_fw_stats_bcn
*dst
;
4503 if (len
< sizeof(*src
)) {
4508 stats
->stats_id
= WMI_REQUEST_BCN_STAT
;
4510 data
+= sizeof(*src
);
4511 len
-= sizeof(*src
);
4513 dst
= kzalloc(sizeof(*dst
), GFP_ATOMIC
);
4517 ath11k_wmi_pull_bcn_stats(src
, dst
);
4518 list_add_tail(&dst
->list
, &stats
->bcn
);
4526 ath11k_pull_pdev_temp_ev(struct ath11k_base
*ab
, u8
*evt_buf
,
4527 u32 len
, const struct wmi_pdev_temperature_event
*ev
)
4532 tb
= ath11k_wmi_tlv_parse_alloc(ab
, evt_buf
, len
, GFP_ATOMIC
);
4535 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4539 ev
= tb
[WMI_TAG_PDEV_TEMPERATURE_EVENT
];
4541 ath11k_warn(ab
, "failed to fetch pdev temp ev");
4550 size_t ath11k_wmi_fw_stats_num_vdevs(struct list_head
*head
)
4552 struct ath11k_fw_stats_vdev
*i
;
4555 list_for_each_entry(i
, head
, list
)
4561 static size_t ath11k_wmi_fw_stats_num_bcn(struct list_head
*head
)
4563 struct ath11k_fw_stats_bcn
*i
;
4566 list_for_each_entry(i
, head
, list
)
4573 ath11k_wmi_fw_pdev_base_stats_fill(const struct ath11k_fw_stats_pdev
*pdev
,
4574 char *buf
, u32
*length
)
4577 u32 buf_len
= ATH11K_FW_STATS_BUF_SIZE
;
4579 len
+= scnprintf(buf
+ len
, buf_len
- len
, "\n");
4580 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n",
4581 "ath11k PDEV stats");
4582 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n\n",
4583 "=================");
4585 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4586 "Channel noise floor", pdev
->ch_noise_floor
);
4587 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4588 "Channel TX power", pdev
->chan_tx_power
);
4589 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4590 "TX frame count", pdev
->tx_frame_count
);
4591 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4592 "RX frame count", pdev
->rx_frame_count
);
4593 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4594 "RX clear count", pdev
->rx_clear_count
);
4595 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4596 "Cycle count", pdev
->cycle_count
);
4597 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4598 "PHY error count", pdev
->phy_err_count
);
4604 ath11k_wmi_fw_pdev_tx_stats_fill(const struct ath11k_fw_stats_pdev
*pdev
,
4605 char *buf
, u32
*length
)
4608 u32 buf_len
= ATH11K_FW_STATS_BUF_SIZE
;
4610 len
+= scnprintf(buf
+ len
, buf_len
- len
, "\n%30s\n",
4611 "ath11k PDEV TX stats");
4612 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n\n",
4613 "====================");
4615 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4616 "HTT cookies queued", pdev
->comp_queued
);
4617 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4618 "HTT cookies disp.", pdev
->comp_delivered
);
4619 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4620 "MSDU queued", pdev
->msdu_enqued
);
4621 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4622 "MPDU queued", pdev
->mpdu_enqued
);
4623 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4624 "MSDUs dropped", pdev
->wmm_drop
);
4625 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4626 "Local enqued", pdev
->local_enqued
);
4627 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4628 "Local freed", pdev
->local_freed
);
4629 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4630 "HW queued", pdev
->hw_queued
);
4631 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4632 "PPDUs reaped", pdev
->hw_reaped
);
4633 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4634 "Num underruns", pdev
->underrun
);
4635 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4636 "PPDUs cleaned", pdev
->tx_abort
);
4637 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4638 "MPDUs requed", pdev
->mpdus_requed
);
4639 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4640 "Excessive retries", pdev
->tx_ko
);
4641 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4642 "HW rate", pdev
->data_rc
);
4643 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4644 "Sched self triggers", pdev
->self_triggers
);
4645 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4646 "Dropped due to SW retries",
4647 pdev
->sw_retry_failure
);
4648 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4649 "Illegal rate phy errors",
4650 pdev
->illgl_rate_phy_err
);
4651 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4652 "PDEV continuous xretry", pdev
->pdev_cont_xretry
);
4653 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4654 "TX timeout", pdev
->pdev_tx_timeout
);
4655 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4656 "PDEV resets", pdev
->pdev_resets
);
4657 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4658 "Stateless TIDs alloc failures",
4659 pdev
->stateless_tid_alloc_failure
);
4660 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4661 "PHY underrun", pdev
->phy_underrun
);
4662 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4663 "MPDU is more than txop limit", pdev
->txop_ovf
);
4668 ath11k_wmi_fw_pdev_rx_stats_fill(const struct ath11k_fw_stats_pdev
*pdev
,
4669 char *buf
, u32
*length
)
4672 u32 buf_len
= ATH11K_FW_STATS_BUF_SIZE
;
4674 len
+= scnprintf(buf
+ len
, buf_len
- len
, "\n%30s\n",
4675 "ath11k PDEV RX stats");
4676 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n\n",
4677 "====================");
4679 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4680 "Mid PPDU route change",
4681 pdev
->mid_ppdu_route_change
);
4682 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4683 "Tot. number of statuses", pdev
->status_rcvd
);
4684 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4685 "Extra frags on rings 0", pdev
->r0_frags
);
4686 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4687 "Extra frags on rings 1", pdev
->r1_frags
);
4688 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4689 "Extra frags on rings 2", pdev
->r2_frags
);
4690 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4691 "Extra frags on rings 3", pdev
->r3_frags
);
4692 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4693 "MSDUs delivered to HTT", pdev
->htt_msdus
);
4694 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4695 "MPDUs delivered to HTT", pdev
->htt_mpdus
);
4696 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4697 "MSDUs delivered to stack", pdev
->loc_msdus
);
4698 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4699 "MPDUs delivered to stack", pdev
->loc_mpdus
);
4700 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4701 "Oversized AMSUs", pdev
->oversize_amsdu
);
4702 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4703 "PHY errors", pdev
->phy_errs
);
4704 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4705 "PHY errors drops", pdev
->phy_err_drop
);
4706 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4707 "MPDU errors (FCS, MIC, ENC)", pdev
->mpdu_errs
);
4712 ath11k_wmi_fw_vdev_stats_fill(struct ath11k
*ar
,
4713 const struct ath11k_fw_stats_vdev
*vdev
,
4714 char *buf
, u32
*length
)
4717 u32 buf_len
= ATH11K_FW_STATS_BUF_SIZE
;
4718 struct ath11k_vif
*arvif
= ath11k_mac_get_arvif(ar
, vdev
->vdev_id
);
4722 /* VDEV stats has all the active VDEVs of other PDEVs as well,
4723 * ignoring those not part of requested PDEV
4728 vif_macaddr
= arvif
->vif
->addr
;
4730 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
4731 "VDEV ID", vdev
->vdev_id
);
4732 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %pM\n",
4733 "VDEV MAC address", vif_macaddr
);
4734 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
4735 "beacon snr", vdev
->beacon_snr
);
4736 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
4737 "data snr", vdev
->data_snr
);
4738 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
4739 "num rx frames", vdev
->num_rx_frames
);
4740 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
4741 "num rts fail", vdev
->num_rts_fail
);
4742 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
4743 "num rts success", vdev
->num_rts_success
);
4744 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
4745 "num rx err", vdev
->num_rx_err
);
4746 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
4747 "num rx discard", vdev
->num_rx_discard
);
4748 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
4749 "num tx not acked", vdev
->num_tx_not_acked
);
4751 for (i
= 0 ; i
< ARRAY_SIZE(vdev
->num_tx_frames
); i
++)
4752 len
+= scnprintf(buf
+ len
, buf_len
- len
,
4755 vdev
->num_tx_frames
[i
]);
4757 for (i
= 0 ; i
< ARRAY_SIZE(vdev
->num_tx_frames_retries
); i
++)
4758 len
+= scnprintf(buf
+ len
, buf_len
- len
,
4760 "num tx frames retries", i
,
4761 vdev
->num_tx_frames_retries
[i
]);
4763 for (i
= 0 ; i
< ARRAY_SIZE(vdev
->num_tx_frames_failures
); i
++)
4764 len
+= scnprintf(buf
+ len
, buf_len
- len
,
4766 "num tx frames failures", i
,
4767 vdev
->num_tx_frames_failures
[i
]);
4769 for (i
= 0 ; i
< ARRAY_SIZE(vdev
->tx_rate_history
); i
++)
4770 len
+= scnprintf(buf
+ len
, buf_len
- len
,
4771 "%25s [%02d] 0x%08x\n",
4772 "tx rate history", i
,
4773 vdev
->tx_rate_history
[i
]);
4775 for (i
= 0 ; i
< ARRAY_SIZE(vdev
->beacon_rssi_history
); i
++)
4776 len
+= scnprintf(buf
+ len
, buf_len
- len
,
4778 "beacon rssi history", i
,
4779 vdev
->beacon_rssi_history
[i
]);
4781 len
+= scnprintf(buf
+ len
, buf_len
- len
, "\n");
4786 ath11k_wmi_fw_bcn_stats_fill(struct ath11k
*ar
,
4787 const struct ath11k_fw_stats_bcn
*bcn
,
4788 char *buf
, u32
*length
)
4791 u32 buf_len
= ATH11K_FW_STATS_BUF_SIZE
;
4792 struct ath11k_vif
*arvif
= ath11k_mac_get_arvif(ar
, bcn
->vdev_id
);
4796 ath11k_warn(ar
->ab
, "invalid vdev id %d in bcn stats",
4801 vdev_macaddr
= arvif
->vif
->addr
;
4803 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
4804 "VDEV ID", bcn
->vdev_id
);
4805 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %pM\n",
4806 "VDEV MAC address", vdev_macaddr
);
4807 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n\n",
4808 "================");
4809 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
4810 "Num of beacon tx success", bcn
->tx_bcn_succ_cnt
);
4811 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
4812 "Num of beacon tx failures", bcn
->tx_bcn_outage_cnt
);
4814 len
+= scnprintf(buf
+ len
, buf_len
- len
, "\n");
4818 void ath11k_wmi_fw_stats_fill(struct ath11k
*ar
,
4819 struct ath11k_fw_stats
*fw_stats
,
4820 u32 stats_id
, char *buf
)
4823 u32 buf_len
= ATH11K_FW_STATS_BUF_SIZE
;
4824 const struct ath11k_fw_stats_pdev
*pdev
;
4825 const struct ath11k_fw_stats_vdev
*vdev
;
4826 const struct ath11k_fw_stats_bcn
*bcn
;
4829 spin_lock_bh(&ar
->data_lock
);
4831 if (stats_id
== WMI_REQUEST_PDEV_STAT
) {
4832 pdev
= list_first_entry_or_null(&fw_stats
->pdevs
,
4833 struct ath11k_fw_stats_pdev
, list
);
4835 ath11k_warn(ar
->ab
, "failed to get pdev stats\n");
4839 ath11k_wmi_fw_pdev_base_stats_fill(pdev
, buf
, &len
);
4840 ath11k_wmi_fw_pdev_tx_stats_fill(pdev
, buf
, &len
);
4841 ath11k_wmi_fw_pdev_rx_stats_fill(pdev
, buf
, &len
);
4844 if (stats_id
== WMI_REQUEST_VDEV_STAT
) {
4845 len
+= scnprintf(buf
+ len
, buf_len
- len
, "\n");
4846 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n",
4847 "ath11k VDEV stats");
4848 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n\n",
4849 "=================");
4851 list_for_each_entry(vdev
, &fw_stats
->vdevs
, list
)
4852 ath11k_wmi_fw_vdev_stats_fill(ar
, vdev
, buf
, &len
);
4855 if (stats_id
== WMI_REQUEST_BCN_STAT
) {
4856 num_bcn
= ath11k_wmi_fw_stats_num_bcn(&fw_stats
->bcn
);
4858 len
+= scnprintf(buf
+ len
, buf_len
- len
, "\n");
4859 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s (%zu)\n",
4860 "ath11k Beacon stats", num_bcn
);
4861 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n\n",
4862 "===================");
4864 list_for_each_entry(bcn
, &fw_stats
->bcn
, list
)
4865 ath11k_wmi_fw_bcn_stats_fill(ar
, bcn
, buf
, &len
);
4869 spin_unlock_bh(&ar
->data_lock
);
4877 static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base
*ab
)
4879 /* try to send pending beacons first. they take priority */
4880 wake_up(&ab
->wmi_ab
.tx_credits_wq
);
4883 static void ath11k_wmi_htc_tx_complete(struct ath11k_base
*ab
,
4884 struct sk_buff
*skb
)
4889 static bool ath11k_reg_is_world_alpha(char *alpha
)
4891 return alpha
[0] == '0' && alpha
[1] == '0';
4894 static int ath11k_reg_chan_list_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
4896 struct cur_regulatory_info
*reg_info
= NULL
;
4897 struct ieee80211_regdomain
*regd
= NULL
;
4898 bool intersect
= false;
4899 int ret
= 0, pdev_idx
;
4902 reg_info
= kzalloc(sizeof(*reg_info
), GFP_ATOMIC
);
4908 ret
= ath11k_pull_reg_chan_list_update_ev(ab
, skb
, reg_info
);
4910 ath11k_warn(ab
, "failed to extract regulatory info from received event\n");
4914 if (reg_info
->status_code
!= REG_SET_CC_STATUS_PASS
) {
4915 /* In case of failure to set the requested ctry,
4916 * fw retains the current regd. We print a failure info
4917 * and return from here.
4919 ath11k_warn(ab
, "Failed to set the requested Country regulatory setting\n");
4923 pdev_idx
= reg_info
->phy_id
;
4925 if (pdev_idx
>= ab
->num_radios
)
4928 /* Avoid multiple overwrites to default regd, during core
4929 * stop-start after mac registration.
4931 if (ab
->default_regd
[pdev_idx
] && !ab
->new_regd
[pdev_idx
] &&
4932 !memcmp((char *)ab
->default_regd
[pdev_idx
]->alpha2
,
4933 (char *)reg_info
->alpha2
, 2))
4936 /* Intersect new rules with default regd if a new country setting was
4937 * requested, i.e a default regd was already set during initialization
4938 * and the regd coming from this event has a valid country info.
4940 if (ab
->default_regd
[pdev_idx
] &&
4941 !ath11k_reg_is_world_alpha((char *)
4942 ab
->default_regd
[pdev_idx
]->alpha2
) &&
4943 !ath11k_reg_is_world_alpha((char *)reg_info
->alpha2
))
4946 regd
= ath11k_reg_build_regd(ab
, reg_info
, intersect
);
4948 ath11k_warn(ab
, "failed to build regd from reg_info\n");
4952 spin_lock(&ab
->base_lock
);
4953 if (test_bit(ATH11K_FLAG_REGISTERED
, &ab
->dev_flags
)) {
4954 /* Once mac is registered, ar is valid and all CC events from
4955 * fw is considered to be received due to user requests
4957 * Free previously built regd before assigning the newly
4958 * generated regd to ar. NULL pointer handling will be
4959 * taken care by kfree itself.
4961 ar
= ab
->pdevs
[pdev_idx
].ar
;
4962 kfree(ab
->new_regd
[pdev_idx
]);
4963 ab
->new_regd
[pdev_idx
] = regd
;
4964 ieee80211_queue_work(ar
->hw
, &ar
->regd_update_work
);
4966 /* Multiple events for the same *ar is not expected. But we
4967 * can still clear any previously stored default_regd if we
4968 * are receiving this event for the same radio by mistake.
4969 * NULL pointer handling will be taken care by kfree itself.
4971 kfree(ab
->default_regd
[pdev_idx
]);
4972 /* This regd would be applied during mac registration */
4973 ab
->default_regd
[pdev_idx
] = regd
;
4975 ab
->dfs_region
= reg_info
->dfs_region
;
4976 spin_unlock(&ab
->base_lock
);
4981 /* Fallback to older reg (by sending previous country setting
4982 * again if fw has succeded and we failed to process here.
4983 * The Regdomain should be uniform across driver and fw. Since the
4984 * FW has processed the command and sent a success status, we expect
4985 * this function to succeed as well. If it doesn't, CTRY needs to be
4986 * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
4988 /* TODO: This is rare, but still should also be handled */
4992 kfree(reg_info
->reg_rules_2g_ptr
);
4993 kfree(reg_info
->reg_rules_5g_ptr
);
4999 static int ath11k_wmi_tlv_rdy_parse(struct ath11k_base
*ab
, u16 tag
, u16 len
,
5000 const void *ptr
, void *data
)
5002 struct wmi_tlv_rdy_parse
*rdy_parse
= data
;
5003 struct wmi_ready_event fixed_param
;
5004 struct wmi_mac_addr
*addr_list
;
5005 struct ath11k_pdev
*pdev
;
5010 case WMI_TAG_READY_EVENT
:
5011 memset(&fixed_param
, 0, sizeof(fixed_param
));
5012 memcpy(&fixed_param
, (struct wmi_ready_event
*)ptr
,
5013 min_t(u16
, sizeof(fixed_param
), len
));
5014 ab
->wlan_init_status
= fixed_param
.ready_event_min
.status
;
5015 rdy_parse
->num_extra_mac_addr
=
5016 fixed_param
.ready_event_min
.num_extra_mac_addr
;
5018 ether_addr_copy(ab
->mac_addr
,
5019 fixed_param
.ready_event_min
.mac_addr
.addr
);
5020 ab
->pktlog_defs_checksum
= fixed_param
.pktlog_defs_checksum
;
5021 ab
->wmi_ready
= true;
5023 case WMI_TAG_ARRAY_FIXED_STRUCT
:
5024 addr_list
= (struct wmi_mac_addr
*)ptr
;
5025 num_mac_addr
= rdy_parse
->num_extra_mac_addr
;
5027 if (!(ab
->num_radios
> 1 && num_mac_addr
>= ab
->num_radios
))
5030 for (i
= 0; i
< ab
->num_radios
; i
++) {
5031 pdev
= &ab
->pdevs
[i
];
5032 ether_addr_copy(pdev
->mac_addr
, addr_list
[i
].addr
);
5034 ab
->pdevs_macaddr_valid
= true;
5043 static int ath11k_ready_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5045 struct wmi_tlv_rdy_parse rdy_parse
= { };
5048 ret
= ath11k_wmi_tlv_iter(ab
, skb
->data
, skb
->len
,
5049 ath11k_wmi_tlv_rdy_parse
, &rdy_parse
);
5051 ath11k_warn(ab
, "failed to parse tlv %d\n", ret
);
5055 complete(&ab
->wmi_ab
.unified_ready
);
5059 static void ath11k_peer_delete_resp_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5061 struct wmi_peer_delete_resp_event peer_del_resp
;
5063 if (ath11k_pull_peer_del_resp_ev(ab
, skb
, &peer_del_resp
) != 0) {
5064 ath11k_warn(ab
, "failed to extract peer delete resp");
5068 /* TODO: Do we need to validate whether ath11k_peer_find() return NULL
5069 * Why this is needed when there is HTT event for peer delete
5073 static inline const char *ath11k_wmi_vdev_resp_print(u32 vdev_resp_status
)
5075 switch (vdev_resp_status
) {
5076 case WMI_VDEV_START_RESPONSE_INVALID_VDEVID
:
5077 return "invalid vdev id";
5078 case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED
:
5079 return "not supported";
5080 case WMI_VDEV_START_RESPONSE_DFS_VIOLATION
:
5081 return "dfs violation";
5082 case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN
:
5083 return "invalid regdomain";
5089 static void ath11k_vdev_start_resp_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5091 struct wmi_vdev_start_resp_event vdev_start_resp
;
5095 if (ath11k_pull_vdev_start_resp_tlv(ab
, skb
, &vdev_start_resp
) != 0) {
5096 ath11k_warn(ab
, "failed to extract vdev start resp");
5101 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, vdev_start_resp
.vdev_id
);
5103 ath11k_warn(ab
, "invalid vdev id in vdev start resp ev %d",
5104 vdev_start_resp
.vdev_id
);
5109 ar
->last_wmi_vdev_start_status
= 0;
5111 status
= vdev_start_resp
.status
;
5113 if (WARN_ON_ONCE(status
)) {
5114 ath11k_warn(ab
, "vdev start resp error status %d (%s)\n",
5115 status
, ath11k_wmi_vdev_resp_print(status
));
5116 ar
->last_wmi_vdev_start_status
= status
;
5119 complete(&ar
->vdev_setup_done
);
5123 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "vdev start resp for vdev id %d",
5124 vdev_start_resp
.vdev_id
);
5127 static void ath11k_bcn_tx_status_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5129 u32 vdev_id
, tx_status
;
5131 if (ath11k_pull_bcn_tx_status_ev(ab
, skb
->data
, skb
->len
,
5132 &vdev_id
, &tx_status
) != 0) {
5133 ath11k_warn(ab
, "failed to extract bcn tx status");
5138 static void ath11k_vdev_stopped_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5143 if (ath11k_pull_vdev_stopped_param_tlv(ab
, skb
, &vdev_id
) != 0) {
5144 ath11k_warn(ab
, "failed to extract vdev stopped event");
5149 ar
= ath11k_mac_get_ar_vdev_stop_status(ab
, vdev_id
);
5151 ath11k_warn(ab
, "invalid vdev id in vdev stopped ev %d",
5157 complete(&ar
->vdev_setup_done
);
5161 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "vdev stopped for vdev id %d", vdev_id
);
5164 static void ath11k_mgmt_rx_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5166 struct mgmt_rx_event_params rx_ev
= {0};
5168 struct ieee80211_rx_status
*status
= IEEE80211_SKB_RXCB(skb
);
5169 struct ieee80211_hdr
*hdr
;
5171 struct ieee80211_supported_band
*sband
;
5173 if (ath11k_pull_mgmt_rx_params_tlv(ab
, skb
, &rx_ev
) != 0) {
5174 ath11k_warn(ab
, "failed to extract mgmt rx event");
5179 memset(status
, 0, sizeof(*status
));
5181 ath11k_dbg(ab
, ATH11K_DBG_MGMT
, "mgmt rx event status %08x\n",
5185 ar
= ath11k_mac_get_ar_by_pdev_id(ab
, rx_ev
.pdev_id
);
5188 ath11k_warn(ab
, "invalid pdev_id %d in mgmt_rx_event\n",
5194 if ((test_bit(ATH11K_CAC_RUNNING
, &ar
->dev_flags
)) ||
5195 (rx_ev
.status
& (WMI_RX_STATUS_ERR_DECRYPT
|
5196 WMI_RX_STATUS_ERR_KEY_CACHE_MISS
| WMI_RX_STATUS_ERR_CRC
))) {
5201 if (rx_ev
.status
& WMI_RX_STATUS_ERR_MIC
)
5202 status
->flag
|= RX_FLAG_MMIC_ERROR
;
5204 if (rx_ev
.channel
>= 1 && rx_ev
.channel
<= 14) {
5205 status
->band
= NL80211_BAND_2GHZ
;
5206 } else if (rx_ev
.channel
>= 36 && rx_ev
.channel
<= ATH11K_MAX_5G_CHAN
) {
5207 status
->band
= NL80211_BAND_5GHZ
;
5209 /* Shouldn't happen unless list of advertised channels to
5210 * mac80211 has been changed.
5217 if (rx_ev
.phy_mode
== MODE_11B
&& status
->band
== NL80211_BAND_5GHZ
)
5218 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
5219 "wmi mgmt rx 11b (CCK) on 5GHz\n");
5221 sband
= &ar
->mac
.sbands
[status
->band
];
5223 status
->freq
= ieee80211_channel_to_frequency(rx_ev
.channel
,
5225 status
->signal
= rx_ev
.snr
+ ATH11K_DEFAULT_NOISE_FLOOR
;
5226 status
->rate_idx
= ath11k_mac_bitrate_to_idx(sband
, rx_ev
.rate
/ 100);
5228 hdr
= (struct ieee80211_hdr
*)skb
->data
;
5229 fc
= le16_to_cpu(hdr
->frame_control
);
5231 /* Firmware is guaranteed to report all essential management frames via
5232 * WMI while it can deliver some extra via HTT. Since there can be
5233 * duplicates split the reporting wrt monitor/sniffing.
5235 status
->flag
|= RX_FLAG_SKIP_MONITOR
;
5237 /* In case of PMF, FW delivers decrypted frames with Protected Bit set.
5238 * Don't clear that. Also, FW delivers broadcast management frames
5239 * (ex: group privacy action frames in mesh) as encrypted payload.
5241 if (ieee80211_has_protected(hdr
->frame_control
) &&
5242 !is_multicast_ether_addr(ieee80211_get_DA(hdr
))) {
5243 status
->flag
|= RX_FLAG_DECRYPTED
;
5245 if (!ieee80211_is_robust_mgmt_frame(skb
)) {
5246 status
->flag
|= RX_FLAG_IV_STRIPPED
|
5247 RX_FLAG_MMIC_STRIPPED
;
5248 hdr
->frame_control
= __cpu_to_le16(fc
&
5249 ~IEEE80211_FCTL_PROTECTED
);
5253 /* TODO: Pending handle beacon implementation
5254 *if (ieee80211_is_beacon(hdr->frame_control))
5255 * ath11k_mac_handle_beacon(ar, skb);
5258 ath11k_dbg(ab
, ATH11K_DBG_MGMT
,
5259 "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
5261 fc
& IEEE80211_FCTL_FTYPE
, fc
& IEEE80211_FCTL_STYPE
);
5263 ath11k_dbg(ab
, ATH11K_DBG_MGMT
,
5264 "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
5265 status
->freq
, status
->band
, status
->signal
,
5268 ieee80211_rx_ni(ar
->hw
, skb
);
5274 static void ath11k_mgmt_tx_compl_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5276 struct wmi_mgmt_tx_compl_event tx_compl_param
= {0};
5279 if (ath11k_pull_mgmt_tx_compl_param_tlv(ab
, skb
, &tx_compl_param
) != 0) {
5280 ath11k_warn(ab
, "failed to extract mgmt tx compl event");
5285 ar
= ath11k_mac_get_ar_by_pdev_id(ab
, tx_compl_param
.pdev_id
);
5287 ath11k_warn(ab
, "invalid pdev id %d in mgmt_tx_compl_event\n",
5288 tx_compl_param
.pdev_id
);
5292 wmi_process_mgmt_tx_comp(ar
, tx_compl_param
.desc_id
,
5293 tx_compl_param
.status
);
5295 ath11k_dbg(ab
, ATH11K_DBG_MGMT
,
5296 "mgmt tx compl ev pdev_id %d, desc_id %d, status %d",
5297 tx_compl_param
.pdev_id
, tx_compl_param
.desc_id
,
5298 tx_compl_param
.status
);
5304 static struct ath11k
*ath11k_get_ar_on_scan_abort(struct ath11k_base
*ab
,
5308 struct ath11k_pdev
*pdev
;
5311 for (i
= 0; i
< ab
->num_radios
; i
++) {
5312 pdev
= rcu_dereference(ab
->pdevs_active
[i
]);
5313 if (pdev
&& pdev
->ar
) {
5316 spin_lock_bh(&ar
->data_lock
);
5317 if (ar
->scan
.state
== ATH11K_SCAN_ABORTING
&&
5318 ar
->scan
.vdev_id
== vdev_id
) {
5319 spin_unlock_bh(&ar
->data_lock
);
5322 spin_unlock_bh(&ar
->data_lock
);
5328 static void ath11k_scan_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5331 struct wmi_scan_event scan_ev
= {0};
5333 if (ath11k_pull_scan_ev(ab
, skb
, &scan_ev
) != 0) {
5334 ath11k_warn(ab
, "failed to extract scan event");
5340 /* In case the scan was cancelled, ex. during interface teardown,
5341 * the interface will not be found in active interfaces.
5342 * Rather, in such scenarios, iterate over the active pdev's to
5343 * search 'ar' if the corresponding 'ar' scan is ABORTING and the
5344 * aborting scan's vdev id matches this event info.
5346 if (scan_ev
.event_type
== WMI_SCAN_EVENT_COMPLETED
&&
5347 scan_ev
.reason
== WMI_SCAN_REASON_CANCELLED
)
5348 ar
= ath11k_get_ar_on_scan_abort(ab
, scan_ev
.vdev_id
);
5350 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, scan_ev
.vdev_id
);
5353 ath11k_warn(ab
, "Received scan event for unknown vdev");
5358 spin_lock_bh(&ar
->data_lock
);
5360 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
5361 "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
5362 ath11k_wmi_event_scan_type_str(scan_ev
.event_type
, scan_ev
.reason
),
5363 scan_ev
.event_type
, scan_ev
.reason
, scan_ev
.channel_freq
,
5364 scan_ev
.scan_req_id
, scan_ev
.scan_id
, scan_ev
.vdev_id
,
5365 ath11k_scan_state_str(ar
->scan
.state
), ar
->scan
.state
);
5367 switch (scan_ev
.event_type
) {
5368 case WMI_SCAN_EVENT_STARTED
:
5369 ath11k_wmi_event_scan_started(ar
);
5371 case WMI_SCAN_EVENT_COMPLETED
:
5372 ath11k_wmi_event_scan_completed(ar
);
5374 case WMI_SCAN_EVENT_BSS_CHANNEL
:
5375 ath11k_wmi_event_scan_bss_chan(ar
);
5377 case WMI_SCAN_EVENT_FOREIGN_CHAN
:
5378 ath11k_wmi_event_scan_foreign_chan(ar
, scan_ev
.channel_freq
);
5380 case WMI_SCAN_EVENT_START_FAILED
:
5381 ath11k_warn(ab
, "received scan start failure event\n");
5382 ath11k_wmi_event_scan_start_failed(ar
);
5384 case WMI_SCAN_EVENT_DEQUEUED
:
5385 case WMI_SCAN_EVENT_PREEMPTED
:
5386 case WMI_SCAN_EVENT_RESTARTED
:
5387 case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT
:
5392 spin_unlock_bh(&ar
->data_lock
);
5397 static void ath11k_peer_sta_kickout_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5399 struct wmi_peer_sta_kickout_arg arg
= {};
5400 struct ieee80211_sta
*sta
;
5401 struct ath11k_peer
*peer
;
5404 if (ath11k_pull_peer_sta_kickout_ev(ab
, skb
, &arg
) != 0) {
5405 ath11k_warn(ab
, "failed to extract peer sta kickout event");
5411 spin_lock_bh(&ab
->base_lock
);
5413 peer
= ath11k_peer_find_by_addr(ab
, arg
.mac_addr
);
5416 ath11k_warn(ab
, "peer not found %pM\n",
5421 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, peer
->vdev_id
);
5423 ath11k_warn(ab
, "invalid vdev id in peer sta kickout ev %d",
5428 sta
= ieee80211_find_sta_by_ifaddr(ar
->hw
,
5429 arg
.mac_addr
, NULL
);
5431 ath11k_warn(ab
, "Spurious quick kickout for STA %pM\n",
5436 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "peer sta kickout event %pM",
5439 ieee80211_report_low_ack(sta
, 10);
5442 spin_unlock_bh(&ab
->base_lock
);
5446 static void ath11k_roam_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5448 struct wmi_roam_event roam_ev
= {};
5451 if (ath11k_pull_roam_ev(ab
, skb
, &roam_ev
) != 0) {
5452 ath11k_warn(ab
, "failed to extract roam event");
5456 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
5457 "wmi roam event vdev %u reason 0x%08x rssi %d\n",
5458 roam_ev
.vdev_id
, roam_ev
.reason
, roam_ev
.rssi
);
5461 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, roam_ev
.vdev_id
);
5463 ath11k_warn(ab
, "invalid vdev id in roam ev %d",
5469 if (roam_ev
.reason
>= WMI_ROAM_REASON_MAX
)
5470 ath11k_warn(ab
, "ignoring unknown roam event reason %d on vdev %i\n",
5471 roam_ev
.reason
, roam_ev
.vdev_id
);
5473 switch (roam_ev
.reason
) {
5474 case WMI_ROAM_REASON_BEACON_MISS
:
5475 /* TODO: Pending beacon miss and connection_loss_work
5477 * ath11k_mac_handle_beacon_miss(ar, vdev_id);
5480 case WMI_ROAM_REASON_BETTER_AP
:
5481 case WMI_ROAM_REASON_LOW_RSSI
:
5482 case WMI_ROAM_REASON_SUITABLE_AP_FOUND
:
5483 case WMI_ROAM_REASON_HO_FAILED
:
5484 ath11k_warn(ab
, "ignoring not implemented roam event reason %d on vdev %i\n",
5485 roam_ev
.reason
, roam_ev
.vdev_id
);
5492 static void ath11k_chan_info_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5494 struct wmi_chan_info_event ch_info_ev
= {0};
5496 struct survey_info
*survey
;
5498 /* HW channel counters frequency value in hertz */
5499 u32 cc_freq_hz
= ab
->cc_freq_hz
;
5501 if (ath11k_pull_chan_info_ev(ab
, skb
->data
, skb
->len
, &ch_info_ev
) != 0) {
5502 ath11k_warn(ab
, "failed to extract chan info event");
5506 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
5507 "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n",
5508 ch_info_ev
.vdev_id
, ch_info_ev
.err_code
, ch_info_ev
.freq
,
5509 ch_info_ev
.cmd_flags
, ch_info_ev
.noise_floor
,
5510 ch_info_ev
.rx_clear_count
, ch_info_ev
.cycle_count
,
5511 ch_info_ev
.mac_clk_mhz
);
5513 if (ch_info_ev
.cmd_flags
== WMI_CHAN_INFO_END_RESP
) {
5514 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "chan info report completed\n");
5519 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, ch_info_ev
.vdev_id
);
5521 ath11k_warn(ab
, "invalid vdev id in chan info ev %d",
5522 ch_info_ev
.vdev_id
);
5526 spin_lock_bh(&ar
->data_lock
);
5528 switch (ar
->scan
.state
) {
5529 case ATH11K_SCAN_IDLE
:
5530 case ATH11K_SCAN_STARTING
:
5531 ath11k_warn(ab
, "received chan info event without a scan request, ignoring\n");
5533 case ATH11K_SCAN_RUNNING
:
5534 case ATH11K_SCAN_ABORTING
:
5538 idx
= freq_to_idx(ar
, ch_info_ev
.freq
);
5539 if (idx
>= ARRAY_SIZE(ar
->survey
)) {
5540 ath11k_warn(ab
, "chan info: invalid frequency %d (idx %d out of bounds)\n",
5541 ch_info_ev
.freq
, idx
);
5545 /* If FW provides MAC clock frequency in Mhz, overriding the initialized
5546 * HW channel counters frequency value
5548 if (ch_info_ev
.mac_clk_mhz
)
5549 cc_freq_hz
= (ch_info_ev
.mac_clk_mhz
* 1000);
5551 if (ch_info_ev
.cmd_flags
== WMI_CHAN_INFO_START_RESP
) {
5552 survey
= &ar
->survey
[idx
];
5553 memset(survey
, 0, sizeof(*survey
));
5554 survey
->noise
= ch_info_ev
.noise_floor
;
5555 survey
->filled
= SURVEY_INFO_NOISE_DBM
| SURVEY_INFO_TIME
|
5556 SURVEY_INFO_TIME_BUSY
;
5557 survey
->time
= div_u64(ch_info_ev
.cycle_count
, cc_freq_hz
);
5558 survey
->time_busy
= div_u64(ch_info_ev
.rx_clear_count
, cc_freq_hz
);
5561 spin_unlock_bh(&ar
->data_lock
);
5566 ath11k_pdev_bss_chan_info_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5568 struct wmi_pdev_bss_chan_info_event bss_ch_info_ev
= {};
5569 struct survey_info
*survey
;
5571 u32 cc_freq_hz
= ab
->cc_freq_hz
;
5572 u64 busy
, total
, tx
, rx
, rx_bss
;
5575 if (ath11k_pull_pdev_bss_chan_info_ev(ab
, skb
, &bss_ch_info_ev
) != 0) {
5576 ath11k_warn(ab
, "failed to extract pdev bss chan info event");
5580 busy
= (u64
)(bss_ch_info_ev
.rx_clear_count_high
) << 32 |
5581 bss_ch_info_ev
.rx_clear_count_low
;
5583 total
= (u64
)(bss_ch_info_ev
.cycle_count_high
) << 32 |
5584 bss_ch_info_ev
.cycle_count_low
;
5586 tx
= (u64
)(bss_ch_info_ev
.tx_cycle_count_high
) << 32 |
5587 bss_ch_info_ev
.tx_cycle_count_low
;
5589 rx
= (u64
)(bss_ch_info_ev
.rx_cycle_count_high
) << 32 |
5590 bss_ch_info_ev
.rx_cycle_count_low
;
5592 rx_bss
= (u64
)(bss_ch_info_ev
.rx_bss_cycle_count_high
) << 32 |
5593 bss_ch_info_ev
.rx_bss_cycle_count_low
;
5595 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
5596 "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
5597 bss_ch_info_ev
.pdev_id
, bss_ch_info_ev
.freq
,
5598 bss_ch_info_ev
.noise_floor
, busy
, total
,
5602 ar
= ath11k_mac_get_ar_by_pdev_id(ab
, bss_ch_info_ev
.pdev_id
);
5605 ath11k_warn(ab
, "invalid pdev id %d in bss_chan_info event\n",
5606 bss_ch_info_ev
.pdev_id
);
5611 spin_lock_bh(&ar
->data_lock
);
5612 idx
= freq_to_idx(ar
, bss_ch_info_ev
.freq
);
5613 if (idx
>= ARRAY_SIZE(ar
->survey
)) {
5614 ath11k_warn(ab
, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
5615 bss_ch_info_ev
.freq
, idx
);
5619 survey
= &ar
->survey
[idx
];
5621 survey
->noise
= bss_ch_info_ev
.noise_floor
;
5622 survey
->time
= div_u64(total
, cc_freq_hz
);
5623 survey
->time_busy
= div_u64(busy
, cc_freq_hz
);
5624 survey
->time_rx
= div_u64(rx_bss
, cc_freq_hz
);
5625 survey
->time_tx
= div_u64(tx
, cc_freq_hz
);
5626 survey
->filled
|= (SURVEY_INFO_NOISE_DBM
|
5628 SURVEY_INFO_TIME_BUSY
|
5629 SURVEY_INFO_TIME_RX
|
5630 SURVEY_INFO_TIME_TX
);
5632 spin_unlock_bh(&ar
->data_lock
);
5633 complete(&ar
->bss_survey_done
);
5638 static void ath11k_vdev_install_key_compl_event(struct ath11k_base
*ab
,
5639 struct sk_buff
*skb
)
5641 struct wmi_vdev_install_key_complete_arg install_key_compl
= {0};
5644 if (ath11k_pull_vdev_install_key_compl_ev(ab
, skb
, &install_key_compl
) != 0) {
5645 ath11k_warn(ab
, "failed to extract install key compl event");
5649 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
5650 "vdev install key ev idx %d flags %08x macaddr %pM status %d\n",
5651 install_key_compl
.key_idx
, install_key_compl
.key_flags
,
5652 install_key_compl
.macaddr
, install_key_compl
.status
);
5655 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, install_key_compl
.vdev_id
);
5657 ath11k_warn(ab
, "invalid vdev id in install key compl ev %d",
5658 install_key_compl
.vdev_id
);
5663 ar
->install_key_status
= 0;
5665 if (install_key_compl
.status
!= WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS
) {
5666 ath11k_warn(ab
, "install key failed for %pM status %d\n",
5667 install_key_compl
.macaddr
, install_key_compl
.status
);
5668 ar
->install_key_status
= install_key_compl
.status
;
5671 complete(&ar
->install_key_done
);
5675 static void ath11k_service_available_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5678 const struct wmi_service_available_event
*ev
;
5682 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
5685 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
5689 ev
= tb
[WMI_TAG_SERVICE_AVAILABLE_EVENT
];
5691 ath11k_warn(ab
, "failed to fetch svc available ev");
5696 /* TODO: Use wmi_service_segment_offset information to get the service
5697 * especially when more services are advertised in multiple sevice
5700 for (i
= 0, j
= WMI_MAX_SERVICE
;
5701 i
< WMI_SERVICE_SEGMENT_BM_SIZE32
&& j
< WMI_MAX_EXT_SERVICE
;
5704 if (ev
->wmi_service_segment_bitmap
[i
] &
5705 BIT(j
% WMI_AVAIL_SERVICE_BITS_IN_SIZE32
))
5706 set_bit(j
, ab
->wmi_ab
.svc_map
);
5707 } while (++j
% WMI_AVAIL_SERVICE_BITS_IN_SIZE32
);
5710 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
5711 "wmi_ext_service_bitmap 0:0x%x, 1:0x%x, 2:0x%x, 3:0x%x",
5712 ev
->wmi_service_segment_bitmap
[0], ev
->wmi_service_segment_bitmap
[1],
5713 ev
->wmi_service_segment_bitmap
[2], ev
->wmi_service_segment_bitmap
[3]);
5718 static void ath11k_peer_assoc_conf_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5720 struct wmi_peer_assoc_conf_arg peer_assoc_conf
= {0};
5723 if (ath11k_pull_peer_assoc_conf_ev(ab
, skb
, &peer_assoc_conf
) != 0) {
5724 ath11k_warn(ab
, "failed to extract peer assoc conf event");
5728 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
5729 "peer assoc conf ev vdev id %d macaddr %pM\n",
5730 peer_assoc_conf
.vdev_id
, peer_assoc_conf
.macaddr
);
5733 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, peer_assoc_conf
.vdev_id
);
5736 ath11k_warn(ab
, "invalid vdev id in peer assoc conf ev %d",
5737 peer_assoc_conf
.vdev_id
);
5742 complete(&ar
->peer_assoc_done
);
5746 static void ath11k_update_stats_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5748 ath11k_debug_fw_stats_process(ab
, skb
);
5751 /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
5752 * is not part of BDF CTL(Conformance test limits) table entries.
5754 static void ath11k_pdev_ctl_failsafe_check_event(struct ath11k_base
*ab
,
5755 struct sk_buff
*skb
)
5758 const struct wmi_pdev_ctl_failsafe_chk_event
*ev
;
5761 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
5764 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
5768 ev
= tb
[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT
];
5770 ath11k_warn(ab
, "failed to fetch pdev ctl failsafe check ev");
5775 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
5776 "pdev ctl failsafe check ev status %d\n",
5777 ev
->ctl_failsafe_status
);
5779 /* If ctl_failsafe_status is set to 1 FW will max out the Transmit power
5780 * to 10 dBm else the CTL power entry in the BDF would be picked up.
5782 if (ev
->ctl_failsafe_status
!= 0)
5783 ath11k_warn(ab
, "pdev ctl failsafe failure status %d",
5784 ev
->ctl_failsafe_status
);
5790 ath11k_wmi_process_csa_switch_count_event(struct ath11k_base
*ab
,
5791 const struct wmi_pdev_csa_switch_ev
*ev
,
5792 const u32
*vdev_ids
)
5795 struct ath11k_vif
*arvif
;
5797 /* Finish CSA once the switch count becomes NULL */
5798 if (ev
->current_switch_count
)
5802 for (i
= 0; i
< ev
->num_vdevs
; i
++) {
5803 arvif
= ath11k_mac_get_arvif_by_vdev_id(ab
, vdev_ids
[i
]);
5806 ath11k_warn(ab
, "Recvd csa status for unknown vdev %d",
5811 if (arvif
->is_up
&& arvif
->vif
->csa_active
)
5812 ieee80211_csa_finish(arvif
->vif
);
5818 ath11k_wmi_pdev_csa_switch_count_status_event(struct ath11k_base
*ab
,
5819 struct sk_buff
*skb
)
5822 const struct wmi_pdev_csa_switch_ev
*ev
;
5823 const u32
*vdev_ids
;
5826 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
5829 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
5833 ev
= tb
[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT
];
5834 vdev_ids
= tb
[WMI_TAG_ARRAY_UINT32
];
5836 if (!ev
|| !vdev_ids
) {
5837 ath11k_warn(ab
, "failed to fetch pdev csa switch count ev");
5842 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
5843 "pdev csa switch count %d for pdev %d, num_vdevs %d",
5844 ev
->current_switch_count
, ev
->pdev_id
,
5847 ath11k_wmi_process_csa_switch_count_event(ab
, ev
, vdev_ids
);
5853 ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5856 const struct wmi_pdev_radar_ev
*ev
;
5860 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
5863 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
5867 ev
= tb
[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT
];
5870 ath11k_warn(ab
, "failed to fetch pdev dfs radar detected ev");
5875 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
5876 "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d",
5877 ev
->pdev_id
, ev
->detection_mode
, ev
->chan_freq
, ev
->chan_width
,
5878 ev
->detector_id
, ev
->segment_id
, ev
->timestamp
, ev
->is_chirp
,
5879 ev
->freq_offset
, ev
->sidx
);
5881 ar
= ath11k_mac_get_ar_by_pdev_id(ab
, ev
->pdev_id
);
5884 ath11k_warn(ab
, "radar detected in invalid pdev %d\n",
5889 ath11k_dbg(ar
->ab
, ATH11K_DBG_REG
, "DFS Radar Detected in pdev %d\n",
5892 if (ar
->dfs_block_radar_events
)
5893 ath11k_info(ab
, "DFS Radar detected, but ignored as requested\n");
5895 ieee80211_radar_detected(ar
->hw
);
5902 ath11k_wmi_pdev_temperature_event(struct ath11k_base
*ab
,
5903 struct sk_buff
*skb
)
5906 struct wmi_pdev_temperature_event ev
= {0};
5908 if (ath11k_pull_pdev_temp_ev(ab
, skb
->data
, skb
->len
, &ev
) != 0) {
5909 ath11k_warn(ab
, "failed to extract pdev temperature event");
5913 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
5914 "pdev temperature ev temp %d pdev_id %d\n", ev
.temp
, ev
.pdev_id
);
5916 ar
= ath11k_mac_get_ar_by_pdev_id(ab
, ev
.pdev_id
);
5918 ath11k_warn(ab
, "invalid pdev id in pdev temperature ev %d", ev
.pdev_id
);
5922 ath11k_thermal_event_temperature(ar
, ev
.temp
);
5925 static void ath11k_wmi_tlv_op_rx(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5927 struct wmi_cmd_hdr
*cmd_hdr
;
5928 enum wmi_tlv_event_id id
;
5930 cmd_hdr
= (struct wmi_cmd_hdr
*)skb
->data
;
5931 id
= FIELD_GET(WMI_CMD_HDR_CMD_ID
, (cmd_hdr
->cmd_id
));
5933 if (skb_pull(skb
, sizeof(struct wmi_cmd_hdr
)) == NULL
)
5937 /* Process all the WMI events here */
5938 case WMI_SERVICE_READY_EVENTID
:
5939 ath11k_service_ready_event(ab
, skb
);
5941 case WMI_SERVICE_READY_EXT_EVENTID
:
5942 ath11k_service_ready_ext_event(ab
, skb
);
5944 case WMI_REG_CHAN_LIST_CC_EVENTID
:
5945 ath11k_reg_chan_list_event(ab
, skb
);
5947 case WMI_READY_EVENTID
:
5948 ath11k_ready_event(ab
, skb
);
5950 case WMI_PEER_DELETE_RESP_EVENTID
:
5951 ath11k_peer_delete_resp_event(ab
, skb
);
5953 case WMI_VDEV_START_RESP_EVENTID
:
5954 ath11k_vdev_start_resp_event(ab
, skb
);
5956 case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID
:
5957 ath11k_bcn_tx_status_event(ab
, skb
);
5959 case WMI_VDEV_STOPPED_EVENTID
:
5960 ath11k_vdev_stopped_event(ab
, skb
);
5962 case WMI_MGMT_RX_EVENTID
:
5963 ath11k_mgmt_rx_event(ab
, skb
);
5964 /* mgmt_rx_event() owns the skb now! */
5966 case WMI_MGMT_TX_COMPLETION_EVENTID
:
5967 ath11k_mgmt_tx_compl_event(ab
, skb
);
5969 case WMI_SCAN_EVENTID
:
5970 ath11k_scan_event(ab
, skb
);
5972 case WMI_PEER_STA_KICKOUT_EVENTID
:
5973 ath11k_peer_sta_kickout_event(ab
, skb
);
5975 case WMI_ROAM_EVENTID
:
5976 ath11k_roam_event(ab
, skb
);
5978 case WMI_CHAN_INFO_EVENTID
:
5979 ath11k_chan_info_event(ab
, skb
);
5981 case WMI_PDEV_BSS_CHAN_INFO_EVENTID
:
5982 ath11k_pdev_bss_chan_info_event(ab
, skb
);
5984 case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID
:
5985 ath11k_vdev_install_key_compl_event(ab
, skb
);
5987 case WMI_SERVICE_AVAILABLE_EVENTID
:
5988 ath11k_service_available_event(ab
, skb
);
5990 case WMI_PEER_ASSOC_CONF_EVENTID
:
5991 ath11k_peer_assoc_conf_event(ab
, skb
);
5993 case WMI_UPDATE_STATS_EVENTID
:
5994 ath11k_update_stats_event(ab
, skb
);
5996 case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID
:
5997 ath11k_pdev_ctl_failsafe_check_event(ab
, skb
);
5999 case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID
:
6000 ath11k_wmi_pdev_csa_switch_count_status_event(ab
, skb
);
6002 case WMI_PDEV_TEMPERATURE_EVENTID
:
6003 ath11k_wmi_pdev_temperature_event(ab
, skb
);
6005 /* add Unsupported events here */
6006 case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID
:
6007 case WMI_VDEV_DELETE_RESP_EVENTID
:
6008 case WMI_PEER_OPER_MODE_CHANGE_EVENTID
:
6009 case WMI_TWT_ENABLE_EVENTID
:
6010 case WMI_TWT_DISABLE_EVENTID
:
6011 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
6012 "ignoring unsupported event 0x%x\n", id
);
6014 case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID
:
6015 ath11k_wmi_pdev_dfs_radar_detected_event(ab
, skb
);
6017 /* TODO: Add remaining events */
6019 ath11k_warn(ab
, "Unknown eventid: 0x%x\n", id
);
6027 static int ath11k_connect_pdev_htc_service(struct ath11k_base
*ab
,
6031 u32 svc_id
[] = { ATH11K_HTC_SVC_ID_WMI_CONTROL
,
6032 ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1
,
6033 ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2
};
6035 struct ath11k_htc_svc_conn_req conn_req
;
6036 struct ath11k_htc_svc_conn_resp conn_resp
;
6038 memset(&conn_req
, 0, sizeof(conn_req
));
6039 memset(&conn_resp
, 0, sizeof(conn_resp
));
6041 /* these fields are the same for all service endpoints */
6042 conn_req
.ep_ops
.ep_tx_complete
= ath11k_wmi_htc_tx_complete
;
6043 conn_req
.ep_ops
.ep_rx_complete
= ath11k_wmi_tlv_op_rx
;
6044 conn_req
.ep_ops
.ep_tx_credits
= ath11k_wmi_op_ep_tx_credits
;
6046 /* connect to control service */
6047 conn_req
.service_id
= svc_id
[pdev_idx
];
6049 status
= ath11k_htc_connect_service(&ab
->htc
, &conn_req
, &conn_resp
);
6051 ath11k_warn(ab
, "failed to connect to WMI CONTROL service status: %d\n",
6056 ab
->wmi_ab
.wmi_endpoint_id
[pdev_idx
] = conn_resp
.eid
;
6057 ab
->wmi_ab
.wmi
[pdev_idx
].eid
= conn_resp
.eid
;
6058 ab
->wmi_ab
.max_msg_len
[pdev_idx
] = conn_resp
.max_msg_len
;
6064 ath11k_wmi_send_unit_test_cmd(struct ath11k
*ar
,
6065 struct wmi_unit_test_cmd ut_cmd
,
6068 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
6069 struct wmi_unit_test_cmd
*cmd
;
6070 struct sk_buff
*skb
;
6071 struct wmi_tlv
*tlv
;
6074 int buf_len
, arg_len
;
6078 arg_len
= sizeof(u32
) * ut_cmd
.num_args
;
6079 buf_len
= sizeof(ut_cmd
) + arg_len
+ TLV_HDR_SIZE
;
6081 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, buf_len
);
6085 cmd
= (struct wmi_unit_test_cmd
*)skb
->data
;
6086 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_UNIT_TEST_CMD
) |
6087 FIELD_PREP(WMI_TLV_LEN
, sizeof(ut_cmd
) - TLV_HDR_SIZE
);
6089 cmd
->vdev_id
= ut_cmd
.vdev_id
;
6090 cmd
->module_id
= ut_cmd
.module_id
;
6091 cmd
->num_args
= ut_cmd
.num_args
;
6092 cmd
->diag_token
= ut_cmd
.diag_token
;
6094 ptr
= skb
->data
+ sizeof(ut_cmd
);
6097 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_UINT32
) |
6098 FIELD_PREP(WMI_TLV_LEN
, arg_len
);
6100 ptr
+= TLV_HDR_SIZE
;
6103 for (i
= 0; i
< ut_cmd
.num_args
; i
++)
6104 ut_cmd_args
[i
] = test_args
[i
];
6106 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_UNIT_TEST_CMDID
);
6109 ath11k_warn(ar
->ab
, "failed to send WMI_UNIT_TEST CMD :%d\n",
6114 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
6115 "WMI unit test : module %d vdev %d n_args %d token %d\n",
6116 cmd
->module_id
, cmd
->vdev_id
, cmd
->num_args
,
6122 int ath11k_wmi_simulate_radar(struct ath11k
*ar
)
6124 struct ath11k_vif
*arvif
;
6125 u32 dfs_args
[DFS_MAX_TEST_ARGS
];
6126 struct wmi_unit_test_cmd wmi_ut
;
6127 bool arvif_found
= false;
6129 list_for_each_entry(arvif
, &ar
->arvifs
, list
) {
6130 if (arvif
->is_started
&& arvif
->vdev_type
== WMI_VDEV_TYPE_AP
) {
6139 dfs_args
[DFS_TEST_CMDID
] = 0;
6140 dfs_args
[DFS_TEST_PDEV_ID
] = ar
->pdev
->pdev_id
;
6141 /* Currently we could pass segment_id(b0 - b1), chirp(b2)
6142 * freq offset (b3 - b10) to unit test. For simulation
6143 * purpose this can be set to 0 which is valid.
6145 dfs_args
[DFS_TEST_RADAR_PARAM
] = 0;
6147 wmi_ut
.vdev_id
= arvif
->vdev_id
;
6148 wmi_ut
.module_id
= DFS_UNIT_TEST_MODULE
;
6149 wmi_ut
.num_args
= DFS_MAX_TEST_ARGS
;
6150 wmi_ut
.diag_token
= DFS_UNIT_TEST_TOKEN
;
6152 ath11k_dbg(ar
->ab
, ATH11K_DBG_REG
, "Triggering Radar Simulation\n");
6154 return ath11k_wmi_send_unit_test_cmd(ar
, wmi_ut
, dfs_args
);
6157 int ath11k_wmi_connect(struct ath11k_base
*ab
)
6162 wmi_ep_count
= ab
->htc
.wmi_ep_count
;
6163 if (wmi_ep_count
> MAX_RADIOS
)
6166 for (i
= 0; i
< wmi_ep_count
; i
++)
6167 ath11k_connect_pdev_htc_service(ab
, i
);
6172 static void ath11k_wmi_pdev_detach(struct ath11k_base
*ab
, u8 pdev_id
)
6174 if (WARN_ON(pdev_id
>= MAX_RADIOS
))
6177 /* TODO: Deinit any pdev specific wmi resource */
6180 int ath11k_wmi_pdev_attach(struct ath11k_base
*ab
,
6183 struct ath11k_pdev_wmi
*wmi_handle
;
6185 if (pdev_id
>= MAX_RADIOS
)
6188 wmi_handle
= &ab
->wmi_ab
.wmi
[pdev_id
];
6190 wmi_handle
->wmi_ab
= &ab
->wmi_ab
;
6193 /* TODO: Init remaining resource specific to pdev */
6198 int ath11k_wmi_attach(struct ath11k_base
*ab
)
6202 ret
= ath11k_wmi_pdev_attach(ab
, 0);
6207 ab
->wmi_ab
.preferred_hw_mode
= WMI_HOST_HW_MODE_MAX
;
6209 /* TODO: Init remaining wmi soc resources required */
6210 init_completion(&ab
->wmi_ab
.service_ready
);
6211 init_completion(&ab
->wmi_ab
.unified_ready
);
6216 void ath11k_wmi_detach(struct ath11k_base
*ab
)
6220 /* TODO: Deinit wmi resource specific to SOC as required */
6222 for (i
= 0; i
< ab
->htc
.wmi_ep_count
; i
++)
6223 ath11k_wmi_pdev_detach(ab
, i
);