1 /*
2 * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /**
21 * DOC: wma_mgmt.c
22 *
23 * This file contains STA/SAP and protocol related functions.
24 */
25
26 /* Header files */
27
28 #include "wma.h"
29 #include "wma_api.h"
30 #include "cds_api.h"
31 #include "wmi_unified_api.h"
32 #include "wlan_qct_sys.h"
33 #include "wni_api.h"
34 #include "ani_global.h"
35 #include "wmi_unified.h"
36 #include "wni_cfg.h"
37
38 #include "qdf_nbuf.h"
39 #include "qdf_types.h"
40 #include "qdf_mem.h"
41
42 #include "wma_types.h"
43 #include "lim_api.h"
44 #include "lim_session_utils.h"
45
46 #include "cds_utils.h"
47 #include "wlan_dlm_api.h"
48 #if defined(CONNECTIVITY_PKTLOG) || !defined(REMOVE_PKT_LOG)
49 #include "pktlog_ac.h"
50 #else
51 #include "pktlog_ac_fmt.h"
52 #endif /* REMOVE_PKT_LOG */
53
54 #include "dbglog_host.h"
55 #include "csr_api.h"
56 #include "ol_fw.h"
57 #include "wma_internal.h"
58 #include "wlan_policy_mgr_api.h"
59 #include "cdp_txrx_flow_ctrl_legacy.h"
60 #include <cdp_txrx_peer_ops.h>
61 #include <cdp_txrx_pmf.h>
62 #include <cdp_txrx_cfg.h>
63 #include <cdp_txrx_cmn.h>
64 #include <cdp_txrx_misc.h>
65 #include <cdp_txrx_misc.h>
66 #include "wlan_mgmt_txrx_tgt_api.h"
67 #include "wlan_objmgr_psoc_obj.h"
68 #include "wlan_objmgr_pdev_obj.h"
69 #include "wlan_objmgr_vdev_obj.h"
70 #include "wlan_lmac_if_api.h"
71 #include <cdp_txrx_handle.h>
72 #include "wma_he.h"
73 #include "wma_eht.h"
74 #include <qdf_crypto.h>
75 #include "wma_twt.h"
76 #include "wlan_p2p_cfg_api.h"
77 #include "cfg_ucfg_api.h"
78 #include "cfg_mlme_sta.h"
79 #include "wlan_mlme_api.h"
80 #include "wmi_unified_bcn_api.h"
81 #include <wlan_crypto_global_api.h>
82 #include <wlan_mlme_main.h>
83 #include <../../core/src/vdev_mgr_ops.h>
84 #include "wlan_pkt_capture_ucfg_api.h"
85
86 #if defined(CONNECTIVITY_PKTLOG) || !defined(REMOVE_PKT_LOG)
87 #include <wlan_logging_sock_svc.h>
88 #endif
89 #include "wlan_cm_roam_api.h"
90 #include "wlan_cm_api.h"
91 #include "wlan_mlo_link_force.h"
92 #include <target_if_spatial_reuse.h>
93 #include "wlan_nan_api_i.h"
94
95 /* Max debug string size for WMM in bytes */
96 #define WMA_WMM_DEBUG_STRING_SIZE 512
97
98 /**
99 * wma_send_bcn_buf_ll() - prepare and send beacon buffer to fw for LL
100 * @wma: wma handle
101 * @vdev_id: vdev id
102 * @param_buf: SWBA parameters
103 *
104 * Return: none
105 */
106 #ifdef WLAN_WMI_BCN
wma_send_bcn_buf_ll(tp_wma_handle wma,uint8_t vdev_id,WMI_HOST_SWBA_EVENTID_param_tlvs * param_buf)107 static void wma_send_bcn_buf_ll(tp_wma_handle wma,
108 uint8_t vdev_id,
109 WMI_HOST_SWBA_EVENTID_param_tlvs *param_buf)
110 {
111 struct ieee80211_frame *wh;
112 struct beacon_info *bcn;
113 wmi_tim_info *tim_info = param_buf->tim_info;
114 uint8_t *bcn_payload;
115 QDF_STATUS ret;
116 struct beacon_tim_ie *tim_ie;
117 wmi_p2p_noa_info *p2p_noa_info = param_buf->p2p_noa_info;
118 struct p2p_sub_element_noa noa_ie;
119 struct wmi_bcn_send_from_host params;
120 uint8_t i;
121
122 bcn = wma->interfaces[vdev_id].beacon;
123 if (!bcn || !bcn->buf) {
124 wma_err("Invalid beacon buffer");
125 return;
126 }
127
128 if (!param_buf->tim_info || !param_buf->p2p_noa_info) {
129 wma_err("Invalid tim info or p2p noa info");
130 return;
131 }
132
133 if (WMI_UNIFIED_NOA_ATTR_NUM_DESC_GET(p2p_noa_info) >
134 WMI_P2P_MAX_NOA_DESCRIPTORS) {
135 wma_err("Too many descriptors %d",
136 WMI_UNIFIED_NOA_ATTR_NUM_DESC_GET(p2p_noa_info));
137 return;
138 }
139
140 qdf_spin_lock_bh(&bcn->lock);
141
142 bcn_payload = qdf_nbuf_data(bcn->buf);
143
144 tim_ie = (struct beacon_tim_ie *)(&bcn_payload[bcn->tim_ie_offset]);
145
146 if (tim_info->tim_changed) {
147 if (tim_info->tim_num_ps_pending)
148 qdf_mem_copy(&tim_ie->tim_bitmap, tim_info->tim_bitmap,
149 WMA_TIM_SUPPORTED_PVB_LENGTH);
150 else
151 qdf_mem_zero(&tim_ie->tim_bitmap,
152 WMA_TIM_SUPPORTED_PVB_LENGTH);
153 /*
154 * Currently we support fixed number of
155 * peers as limited by HAL_NUM_STA.
156 * tim offset is always 0
157 */
158 tim_ie->tim_bitctl = 0;
159 }
160
161 /* Update DTIM Count */
162 if (tim_ie->dtim_count == 0)
163 tim_ie->dtim_count = tim_ie->dtim_period - 1;
164 else
165 tim_ie->dtim_count--;
166
167 /*
168 * DTIM count needs to be backedup so that
169 * when umac updates the beacon template
170 * current dtim count can be updated properly
171 */
172 bcn->dtim_count = tim_ie->dtim_count;
173
174 /* update state for buffered multicast frames on DTIM */
175 if (tim_info->tim_mcast && (tim_ie->dtim_count == 0 ||
176 tim_ie->dtim_period == 1))
177 tim_ie->tim_bitctl |= 1;
178 else
179 tim_ie->tim_bitctl &= ~1;
180
181 /* To avoid sw generated frame sequence the same as H/W generated frame,
182 * the value lower than min_sw_seq is reserved for HW generated frame
183 */
184 if ((bcn->seq_no & IEEE80211_SEQ_MASK) < MIN_SW_SEQ)
185 bcn->seq_no = MIN_SW_SEQ;
186
187 wh = (struct ieee80211_frame *)bcn_payload;
188 *(uint16_t *) &wh->i_seq[0] = htole16(bcn->seq_no
189 << IEEE80211_SEQ_SEQ_SHIFT);
190 bcn->seq_no++;
191
192 if (WMI_UNIFIED_NOA_ATTR_IS_MODIFIED(p2p_noa_info)) {
193 qdf_mem_zero(&noa_ie, sizeof(noa_ie));
194
195 noa_ie.index =
196 (uint8_t) WMI_UNIFIED_NOA_ATTR_INDEX_GET(p2p_noa_info);
197 noa_ie.oppPS =
198 (uint8_t) WMI_UNIFIED_NOA_ATTR_OPP_PS_GET(p2p_noa_info);
199 noa_ie.ctwindow =
200 (uint8_t) WMI_UNIFIED_NOA_ATTR_CTWIN_GET(p2p_noa_info);
201 noa_ie.num_descriptors = (uint8_t)
202 WMI_UNIFIED_NOA_ATTR_NUM_DESC_GET(p2p_noa_info);
203 wma_debug("index %u, oppPs %u, ctwindow %u, num_descriptors = %u",
204 noa_ie.index,
205 noa_ie.oppPS, noa_ie.ctwindow, noa_ie.num_descriptors);
206 for (i = 0; i < noa_ie.num_descriptors; i++) {
207 noa_ie.noa_descriptors[i].type_count =
208 (uint8_t) p2p_noa_info->noa_descriptors[i].
209 type_count;
210 noa_ie.noa_descriptors[i].duration =
211 p2p_noa_info->noa_descriptors[i].duration;
212 noa_ie.noa_descriptors[i].interval =
213 p2p_noa_info->noa_descriptors[i].interval;
214 noa_ie.noa_descriptors[i].start_time =
215 p2p_noa_info->noa_descriptors[i].start_time;
216 wma_debug("NoA descriptor[%d] type_count %u, duration %u, interval %u, start_time = %u",
217 i,
218 noa_ie.noa_descriptors[i].type_count,
219 noa_ie.noa_descriptors[i].duration,
220 noa_ie.noa_descriptors[i].interval,
221 noa_ie.noa_descriptors[i].start_time);
222 }
223 wma_update_noa(bcn, &noa_ie);
224
225 /* Send a msg to LIM to update the NoA IE in probe response
226 * frames transmitted by the host
227 */
228 wma_update_probe_resp_noa(wma, &noa_ie);
229 }
230
231 if (bcn->dma_mapped) {
232 qdf_nbuf_unmap_single(wma->qdf_dev, bcn->buf, QDF_DMA_TO_DEVICE);
233 bcn->dma_mapped = 0;
234 }
235 ret = qdf_nbuf_map_single(wma->qdf_dev, bcn->buf, QDF_DMA_TO_DEVICE);
236 if (ret != QDF_STATUS_SUCCESS) {
237 wma_err("failed map beacon buf to DMA region");
238 qdf_spin_unlock_bh(&bcn->lock);
239 return;
240 }
241
242 bcn->dma_mapped = 1;
243 params.vdev_id = vdev_id;
244 params.data_len = bcn->len;
245 params.frame_ctrl = *((A_UINT16 *) wh->i_fc);
246 params.frag_ptr = qdf_nbuf_get_frag_paddr(bcn->buf, 0);
247 params.dtim_flag = 0;
248 /* notify Firmware of DTM and mcast/bcast traffic */
249 if (tim_ie->dtim_count == 0) {
250 params.dtim_flag |= WMI_BCN_SEND_DTIM_ZERO;
251 /* deliver mcast/bcast traffic in next DTIM beacon */
252 if (tim_ie->tim_bitctl & 0x01)
253 params.dtim_flag |= WMI_BCN_SEND_DTIM_BITCTL_SET;
254 }
255
256 wmi_unified_bcn_buf_ll_cmd(wma->wmi_handle,
257 ¶ms);
258
259 qdf_spin_unlock_bh(&bcn->lock);
260 }
261 #else
262 static inline void
wma_send_bcn_buf_ll(tp_wma_handle wma,uint8_t vdev_id,WMI_HOST_SWBA_EVENTID_param_tlvs * param_buf)263 wma_send_bcn_buf_ll(tp_wma_handle wma,
264 uint8_t vdev_id,
265 WMI_HOST_SWBA_EVENTID_param_tlvs *param_buf)
266 {
267 }
268 #endif
269 /**
270 * wma_beacon_swba_handler() - swba event handler
271 * @handle: wma handle
272 * @event: event data
273 * @len: data length
274 *
275 * SWBA event is alert event to Host requesting host to Queue a beacon
276 * for transmission use only in host beacon mode
277 *
278 * Return: 0 for success or error code
279 */
280 #ifdef WLAN_WMI_BCN
wma_beacon_swba_handler(void * handle,uint8_t * event,uint32_t len)281 int wma_beacon_swba_handler(void *handle, uint8_t *event, uint32_t len)
282 {
283 tp_wma_handle wma = (tp_wma_handle) handle;
284 WMI_HOST_SWBA_EVENTID_param_tlvs *param_buf;
285 wmi_host_swba_event_fixed_param *swba_event;
286 uint32_t vdev_map;
287 uint8_t vdev_id = 0;
288 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
289
290 param_buf = (WMI_HOST_SWBA_EVENTID_param_tlvs *) event;
291 if (!param_buf) {
292 wma_err("Invalid swba event buffer");
293 return -EINVAL;
294 }
295 swba_event = param_buf->fixed_param;
296 vdev_map = swba_event->vdev_map;
297
298 wma_debug("vdev_map = %d", vdev_map);
299 for (; vdev_map && vdev_id < wma->max_bssid;
300 vdev_id++, vdev_map >>= 1) {
301 if (!(vdev_map & 0x1))
302 continue;
303 if (!cdp_cfg_is_high_latency(soc,
304 (struct cdp_cfg *)cds_get_context(QDF_MODULE_ID_CFG)))
305 wma_send_bcn_buf_ll(wma, vdev_id, param_buf);
306 break;
307 }
308 return 0;
309 }
310 #else
311 static inline int
wma_beacon_swba_handler(void * handle,uint8_t * event,uint32_t len)312 wma_beacon_swba_handler(void *handle, uint8_t *event, uint32_t len)
313 {
314 return 0;
315 }
316 #endif
317
318 #ifdef FEATURE_WLAN_DIAG_SUPPORT
wma_sta_kickout_event(uint32_t kickout_reason,uint8_t vdev_id,uint8_t * macaddr)319 void wma_sta_kickout_event(uint32_t kickout_reason, uint8_t vdev_id,
320 uint8_t *macaddr)
321 {
322 WLAN_HOST_DIAG_EVENT_DEF(sta_kickout, struct host_event_wlan_kickout);
323 qdf_mem_zero(&sta_kickout, sizeof(sta_kickout));
324 sta_kickout.reasoncode = kickout_reason;
325 sta_kickout.vdev_id = vdev_id;
326 if (macaddr)
327 qdf_mem_copy(sta_kickout.peer_mac, macaddr,
328 QDF_MAC_ADDR_SIZE);
329 WLAN_HOST_DIAG_EVENT_REPORT(&sta_kickout, EVENT_WLAN_STA_KICKOUT);
330 }
331 #endif
332
wma_peer_sta_kickout_event_handler(void * handle,uint8_t * event,uint32_t len)333 int wma_peer_sta_kickout_event_handler(void *handle, uint8_t *event,
334 uint32_t len)
335 {
336 tp_wma_handle wma = (tp_wma_handle) handle;
337 WMI_PEER_STA_KICKOUT_EVENTID_param_tlvs *param_buf = NULL;
338 wmi_peer_sta_kickout_event_fixed_param *kickout_event = NULL;
339 uint8_t vdev_id, macaddr[QDF_MAC_ADDR_SIZE];
340 tpDeleteStaContext del_sta_ctx;
341 uint8_t *addr, *bssid;
342 struct wlan_objmgr_vdev *vdev;
343 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
344
345 param_buf = (WMI_PEER_STA_KICKOUT_EVENTID_param_tlvs *) event;
346 kickout_event = param_buf->fixed_param;
347 WMI_MAC_ADDR_TO_CHAR_ARRAY(&kickout_event->peer_macaddr, macaddr);
348 if (cdp_peer_get_vdevid(soc, macaddr, &vdev_id) !=
349 QDF_STATUS_SUCCESS) {
350 wma_err("Not able to find BSSID for peer ["QDF_MAC_ADDR_FMT"]",
351 QDF_MAC_ADDR_REF(macaddr));
352 return -EINVAL;
353 }
354
355 if (!wma_is_vdev_valid(vdev_id))
356 return -EINVAL;
357
358 vdev = wma->interfaces[vdev_id].vdev;
359 if (!vdev) {
360 wma_err("Not able to find vdev for VDEV_%d", vdev_id);
361 return -EINVAL;
362 }
363 addr = wlan_vdev_mlme_get_macaddr(vdev);
364
365 wma_nofl_info("STA kickout for "QDF_MAC_ADDR_FMT", on mac "QDF_MAC_ADDR_FMT", vdev %d, reason:%d",
366 QDF_MAC_ADDR_REF(macaddr), QDF_MAC_ADDR_REF(addr),
367 vdev_id, kickout_event->reason);
368
369 if (wma_is_roam_in_progress(vdev_id)) {
370 wma_err("vdev_id %d: Ignore STA kick out since roaming is in progress",
371 vdev_id);
372 return -EINVAL;
373 }
374 bssid = wma_get_vdev_bssid(vdev);
375 if (!bssid) {
376 wma_err("Failed to get bssid for vdev_%d", vdev_id);
377 return -ENOMEM;
378 }
379
380 switch (kickout_event->reason) {
381 case WMI_PEER_STA_KICKOUT_REASON_IBSS_DISCONNECT:
382 goto exit_handler;
383 #ifdef FEATURE_WLAN_TDLS
384 case WMI_PEER_STA_KICKOUT_REASON_TDLS_DISCONNECT:
385 del_sta_ctx = (tpDeleteStaContext)
386 qdf_mem_malloc(sizeof(tDeleteStaContext));
387 if (!del_sta_ctx) {
388 wma_err("mem alloc failed for struct del_sta_context for TDLS peer: "QDF_MAC_ADDR_FMT,
389 QDF_MAC_ADDR_REF(macaddr));
390 return -ENOMEM;
391 }
392
393 del_sta_ctx->is_tdls = true;
394 del_sta_ctx->vdev_id = vdev_id;
395 qdf_mem_copy(del_sta_ctx->addr2, macaddr, QDF_MAC_ADDR_SIZE);
396 qdf_mem_copy(del_sta_ctx->bssId, bssid,
397 QDF_MAC_ADDR_SIZE);
398 del_sta_ctx->reasonCode = HAL_DEL_STA_REASON_CODE_KEEP_ALIVE;
399 wma_send_msg(wma, SIR_LIM_DELETE_STA_CONTEXT_IND,
400 (void *)del_sta_ctx, 0);
401 goto exit_handler;
402 #endif /* FEATURE_WLAN_TDLS */
403
404 case WMI_PEER_STA_KICKOUT_REASON_UNSPECIFIED:
405 /*
406 * Default legacy value used by original firmware implementation
407 */
408 if (wma->interfaces[vdev_id].type == WMI_VDEV_TYPE_STA &&
409 (wma->interfaces[vdev_id].sub_type == 0 ||
410 wma->interfaces[vdev_id].sub_type ==
411 WMI_UNIFIED_VDEV_SUBTYPE_P2P_CLIENT) &&
412 !qdf_mem_cmp(bssid,
413 macaddr, QDF_MAC_ADDR_SIZE)) {
414 wma_sta_kickout_event(
415 HOST_STA_KICKOUT_REASON_UNSPECIFIED, vdev_id, macaddr);
416 /*
417 * KICKOUT event is for current station-AP connection.
418 * Treat it like final beacon miss. Station may not have
419 * missed beacons but not able to transmit frames to AP
420 * for a long time. Must disconnect to get out of
421 * this sticky situation.
422 * In future implementation, roaming module will also
423 * handle this event and perform a scan.
424 */
425 wma_warn("WMI_PEER_STA_KICKOUT_REASON_UNSPECIFIED event for STA");
426 wma_beacon_miss_handler(wma, vdev_id,
427 kickout_event->rssi);
428 goto exit_handler;
429 }
430 break;
431
432 case WMI_PEER_STA_KICKOUT_REASON_XRETRY:
433 case WMI_PEER_STA_KICKOUT_REASON_INACTIVITY:
434 /*
435 * Handle SA query kickout is same as inactivity kickout.
436 * This could be for STA or SAP role
437 */
438 case WMI_PEER_STA_KICKOUT_REASON_SA_QUERY_TIMEOUT:
439 default:
440 break;
441 }
442
443 /*
444 * default action is to send delete station context indication to LIM
445 */
446 del_sta_ctx =
447 (tDeleteStaContext *) qdf_mem_malloc(sizeof(tDeleteStaContext));
448 if (!del_sta_ctx) {
449 wma_err("QDF MEM Alloc Failed for struct del_sta_context");
450 return -ENOMEM;
451 }
452
453 del_sta_ctx->is_tdls = false;
454 del_sta_ctx->vdev_id = vdev_id;
455 qdf_mem_copy(del_sta_ctx->addr2, macaddr, QDF_MAC_ADDR_SIZE);
456 qdf_mem_copy(del_sta_ctx->bssId, addr, QDF_MAC_ADDR_SIZE);
457 if (kickout_event->reason ==
458 WMI_PEER_STA_KICKOUT_REASON_SA_QUERY_TIMEOUT)
459 del_sta_ctx->reasonCode =
460 HAL_DEL_STA_REASON_CODE_SA_QUERY_TIMEOUT;
461 else if (kickout_event->reason == WMI_PEER_STA_KICKOUT_REASON_XRETRY)
462 del_sta_ctx->reasonCode = HAL_DEL_STA_REASON_CODE_XRETRY;
463 else
464 del_sta_ctx->reasonCode = HAL_DEL_STA_REASON_CODE_KEEP_ALIVE;
465
466 if (wmi_service_enabled(wma->wmi_handle,
467 wmi_service_hw_db2dbm_support))
468 del_sta_ctx->rssi = kickout_event->rssi;
469 else
470 del_sta_ctx->rssi = kickout_event->rssi +
471 WMA_TGT_NOISE_FLOOR_DBM;
472 wma_sta_kickout_event(del_sta_ctx->reasonCode, vdev_id, macaddr);
473 wma_send_msg(wma, SIR_LIM_DELETE_STA_CONTEXT_IND, (void *)del_sta_ctx,
474 0);
475 wma_lost_link_info_handler(wma, vdev_id, del_sta_ctx->rssi);
476
477 exit_handler:
478 return 0;
479 }
480
wma_unified_bcntx_status_event_handler(void * handle,uint8_t * cmd_param_info,uint32_t len)481 int wma_unified_bcntx_status_event_handler(void *handle,
482 uint8_t *cmd_param_info,
483 uint32_t len)
484 {
485 tp_wma_handle wma = (tp_wma_handle) handle;
486 WMI_OFFLOAD_BCN_TX_STATUS_EVENTID_param_tlvs *param_buf;
487 wmi_offload_bcn_tx_status_event_fixed_param *resp_event;
488 tSirFirstBeaconTxCompleteInd *beacon_tx_complete_ind;
489
490 param_buf =
491 (WMI_OFFLOAD_BCN_TX_STATUS_EVENTID_param_tlvs *) cmd_param_info;
492 if (!param_buf) {
493 wma_err("Invalid bcn tx response event buffer");
494 return -EINVAL;
495 }
496
497 resp_event = param_buf->fixed_param;
498
499 if (resp_event->vdev_id >= wma->max_bssid) {
500 wma_err("received invalid vdev_id %d", resp_event->vdev_id);
501 return -EINVAL;
502 }
503
504 /* Check for valid handle to ensure session is not
505 * deleted in any race
506 */
507 if (!wma->interfaces[resp_event->vdev_id].vdev) {
508 wma_err("vdev is NULL for vdev_%d", resp_event->vdev_id);
509 return -EINVAL;
510 }
511
512 /* Beacon Tx Indication supports only AP mode. Ignore in other modes */
513 if (wma_is_vdev_in_ap_mode(wma, resp_event->vdev_id) == false) {
514 wma_debug("Beacon Tx Indication does not support type %d and sub_type %d",
515 wma->interfaces[resp_event->vdev_id].type,
516 wma->interfaces[resp_event->vdev_id].sub_type);
517 return 0;
518 }
519
520 beacon_tx_complete_ind = (tSirFirstBeaconTxCompleteInd *)
521 qdf_mem_malloc(sizeof(tSirFirstBeaconTxCompleteInd));
522 if (!beacon_tx_complete_ind) {
523 wma_err("Failed to alloc beacon_tx_complete_ind");
524 return -ENOMEM;
525 }
526
527 beacon_tx_complete_ind->messageType = WMA_DFS_BEACON_TX_SUCCESS_IND;
528 beacon_tx_complete_ind->length = sizeof(tSirFirstBeaconTxCompleteInd);
529 beacon_tx_complete_ind->bss_idx = resp_event->vdev_id;
530
531 wma_send_msg(wma, WMA_DFS_BEACON_TX_SUCCESS_IND,
532 (void *)beacon_tx_complete_ind, 0);
533 return 0;
534 }
535
536 /**
537 * wma_get_go_probe_timeout() - get P2P GO probe timeout
538 * @mac: UMAC handler
539 * @max_inactive_time: return max inactive time
540 * @max_unresponsive_time: return max unresponsive time
541 *
542 * Return: none
543 */
544 #ifdef CONVERGED_P2P_ENABLE
545 static inline void
wma_get_go_probe_timeout(struct mac_context * mac,uint32_t * max_inactive_time,uint32_t * max_unresponsive_time)546 wma_get_go_probe_timeout(struct mac_context *mac,
547 uint32_t *max_inactive_time,
548 uint32_t *max_unresponsive_time)
549 {
550 uint32_t keep_alive;
551 QDF_STATUS status;
552
553 status = cfg_p2p_get_go_link_monitor_period(mac->psoc,
554 max_inactive_time);
555 if (QDF_IS_STATUS_ERROR(status)) {
556 wma_err("Failed to go monitor period");
557 *max_inactive_time = WMA_LINK_MONITOR_DEFAULT_TIME_SECS;
558 }
559 status = cfg_p2p_get_go_keepalive_period(mac->psoc,
560 &keep_alive);
561 if (QDF_IS_STATUS_ERROR(status)) {
562 wma_err("Failed to read go keep alive");
563 keep_alive = WMA_KEEP_ALIVE_DEFAULT_TIME_SECS;
564 }
565
566 *max_unresponsive_time = *max_inactive_time + keep_alive;
567 }
568 #else
569 static inline void
wma_get_go_probe_timeout(struct mac_context * mac,uint32_t * max_inactive_time,uint32_t * max_unresponsive_time)570 wma_get_go_probe_timeout(struct mac_context *mac,
571 uint32_t *max_inactive_time,
572 uint32_t *max_unresponsive_time)
573 {
574 }
575 #endif
576
577 /**
578 * wma_get_link_probe_timeout() - get link timeout based on sub type
579 * @mac: UMAC handler
580 * @sub_type: vdev syb type
581 * @max_inactive_time: return max inactive time
582 * @max_unresponsive_time: return max unresponsive time
583 *
584 * Return: none
585 */
586 static inline void
wma_get_link_probe_timeout(struct mac_context * mac,uint32_t sub_type,uint32_t * max_inactive_time,uint32_t * max_unresponsive_time)587 wma_get_link_probe_timeout(struct mac_context *mac,
588 uint32_t sub_type,
589 uint32_t *max_inactive_time,
590 uint32_t *max_unresponsive_time)
591 {
592 if (sub_type == WMI_UNIFIED_VDEV_SUBTYPE_P2P_GO) {
593 wma_get_go_probe_timeout(mac, max_inactive_time,
594 max_unresponsive_time);
595 } else {
596 *max_inactive_time =
597 mac->mlme_cfg->timeouts.ap_link_monitor_timeout;
598 *max_unresponsive_time = *max_inactive_time +
599 mac->mlme_cfg->timeouts.ap_keep_alive_timeout;
600 }
601 }
602
603 /**
604 * wma_verify_rate_code() - verify if rate code is valid.
605 * @rate_code: rate code
606 * @band: band information
607 *
608 * Return: verify result
609 */
wma_verify_rate_code(u_int32_t rate_code,enum cds_band_type band)610 static bool wma_verify_rate_code(u_int32_t rate_code, enum cds_band_type band)
611 {
612 uint8_t preamble, nss, rate;
613 bool valid = true;
614
615 preamble = (rate_code & 0xc0) >> 6;
616 nss = (rate_code & 0x30) >> 4;
617 rate = rate_code & 0xf;
618
619 switch (preamble) {
620 case WMI_RATE_PREAMBLE_CCK:
621 if (nss != 0 || rate > 3 || band == CDS_BAND_5GHZ)
622 valid = false;
623 break;
624 case WMI_RATE_PREAMBLE_OFDM:
625 if (nss != 0 || rate > 7)
626 valid = false;
627 break;
628 case WMI_RATE_PREAMBLE_HT:
629 if (nss != 0 || rate > 7)
630 valid = false;
631 break;
632 case WMI_RATE_PREAMBLE_VHT:
633 if (nss != 0 || rate > 9)
634 valid = false;
635 break;
636 default:
637 break;
638 }
639 return valid;
640 }
641
642 #define TX_MGMT_RATE_2G_ENABLE_OFFSET 30
643 #define TX_MGMT_RATE_5G_ENABLE_OFFSET 31
644 #define TX_MGMT_RATE_2G_OFFSET 0
645 #define TX_MGMT_RATE_5G_OFFSET 12
646
647 #define MAX_VDEV_MGMT_RATE_PARAMS 2
648 /* params being sent:
649 * wmi_vdev_param_mgmt_tx_rate
650 * wmi_vdev_param_per_band_mgmt_tx_rate
651 */
652
653 /**
654 * wma_set_mgmt_rate() - set vdev mgmt rate.
655 * @wma: wma handle
656 * @vdev_id: vdev id
657 *
658 * Return: None
659 */
wma_set_vdev_mgmt_rate(tp_wma_handle wma,uint8_t vdev_id)660 void wma_set_vdev_mgmt_rate(tp_wma_handle wma, uint8_t vdev_id)
661 {
662 uint32_t cfg_val;
663 uint32_t per_band_mgmt_tx_rate = 0;
664 enum cds_band_type band = 0;
665 struct mac_context *mac = cds_get_context(QDF_MODULE_ID_PE);
666 struct dev_set_param setparam[MAX_VDEV_MGMT_RATE_PARAMS] = {};
667 uint8_t index = 0;
668 QDF_STATUS status = QDF_STATUS_E_FAILURE;
669
670 if (!mac) {
671 wma_err("Failed to get mac");
672 return;
673 }
674
675 cfg_val = mac->mlme_cfg->sap_cfg.rate_tx_mgmt;
676 band = CDS_BAND_ALL;
677 if ((cfg_val == MLME_CFG_TX_MGMT_RATE_DEF) ||
678 !wma_verify_rate_code(cfg_val, band)) {
679 wma_nofl_debug("default WNI_CFG_RATE_FOR_TX_MGMT, ignore");
680 } else {
681 status = mlme_check_index_setparam(setparam,
682 wmi_vdev_param_mgmt_tx_rate,
683 cfg_val, index++,
684 MAX_VDEV_MGMT_RATE_PARAMS);
685 if (QDF_IS_STATUS_ERROR(status)) {
686 wma_err("failed at wmi_vdev_param_mgmt_tx_rate");
687 goto error;
688 }
689 }
690
691 cfg_val = mac->mlme_cfg->sap_cfg.rate_tx_mgmt_2g;
692 band = CDS_BAND_2GHZ;
693 if ((cfg_val == MLME_CFG_TX_MGMT_2G_RATE_DEF) ||
694 !wma_verify_rate_code(cfg_val, band)) {
695 wma_nofl_debug("use default 2G MGMT rate.");
696 per_band_mgmt_tx_rate &=
697 ~(1 << TX_MGMT_RATE_2G_ENABLE_OFFSET);
698 } else {
699 per_band_mgmt_tx_rate |=
700 (1 << TX_MGMT_RATE_2G_ENABLE_OFFSET);
701 per_band_mgmt_tx_rate |=
702 ((cfg_val & 0x7FF) << TX_MGMT_RATE_2G_OFFSET);
703 }
704
705 cfg_val = mac->mlme_cfg->sap_cfg.rate_tx_mgmt;
706 band = CDS_BAND_5GHZ;
707 if ((cfg_val == MLME_CFG_TX_MGMT_5G_RATE_DEF) ||
708 !wma_verify_rate_code(cfg_val, band)) {
709 wma_nofl_debug("use default 5G MGMT rate.");
710 per_band_mgmt_tx_rate &=
711 ~(1 << TX_MGMT_RATE_5G_ENABLE_OFFSET);
712 } else {
713 per_band_mgmt_tx_rate |=
714 (1 << TX_MGMT_RATE_5G_ENABLE_OFFSET);
715 per_band_mgmt_tx_rate |=
716 ((cfg_val & 0x7FF) << TX_MGMT_RATE_5G_OFFSET);
717 }
718
719 status = mlme_check_index_setparam(setparam,
720 wmi_vdev_param_per_band_mgmt_tx_rate,
721 per_band_mgmt_tx_rate, index++,
722 MAX_VDEV_MGMT_RATE_PARAMS);
723 if (QDF_IS_STATUS_ERROR(status)) {
724 wma_err("failed at wmi_vdev_param_per_band_mgmt_tx_rate");
725 goto error;
726 }
727
728 status = wma_send_multi_pdev_vdev_set_params(MLME_VDEV_SETPARAM,
729 vdev_id, setparam, index);
730 if (QDF_IS_STATUS_ERROR(status))
731 wma_debug("failed to send MGMT_TX_RATE vdev set params stat:%d",
732 status);
733 error:
734 return;
735 }
736
737 #define MAX_VDEV_SAP_KEEPALIVE_PARAMS 3
738 /* params being sent:
739 * wmi_vdev_param_ap_keepalive_min_idle_inactive_time_secs
740 * wmi_vdev_param_ap_keepalive_max_idle_inactive_time_secs
741 * wmi_vdev_param_ap_keepalive_max_unresponsive_time_secs
742 */
743
744 /**
745 * wma_set_sap_keepalive() - set SAP keep alive parameters to fw
746 * @wma: wma handle
747 * @vdev_id: vdev id
748 *
749 * Return: none
750 */
wma_set_sap_keepalive(tp_wma_handle wma,uint8_t vdev_id)751 void wma_set_sap_keepalive(tp_wma_handle wma, uint8_t vdev_id)
752 {
753 uint32_t min_inactive_time, max_inactive_time, max_unresponsive_time;
754 struct mac_context *mac = cds_get_context(QDF_MODULE_ID_PE);
755 QDF_STATUS status;
756 struct dev_set_param setparam[MAX_VDEV_SAP_KEEPALIVE_PARAMS] = {};
757 uint8_t index = 0;
758
759 if (!mac) {
760 wma_err("Failed to get mac");
761 return;
762 }
763
764 wma_get_link_probe_timeout(mac, wma->interfaces[vdev_id].sub_type,
765 &max_inactive_time, &max_unresponsive_time);
766
767 min_inactive_time = max_inactive_time / 2;
768 status = mlme_check_index_setparam(
769 setparam,
770 wmi_vdev_param_ap_keepalive_min_idle_inactive_time_secs,
771 min_inactive_time, index++,
772 MAX_VDEV_SAP_KEEPALIVE_PARAMS);
773 if (QDF_IS_STATUS_ERROR(status)) {
774 wma_err("failed to set wmi_vdev_param_ap_keepalive_min_idle_inactive_time_secs");
775 goto error;
776 }
777 status = mlme_check_index_setparam(
778 setparam,
779 wmi_vdev_param_ap_keepalive_max_idle_inactive_time_secs,
780 max_inactive_time, index++,
781 MAX_VDEV_SAP_KEEPALIVE_PARAMS);
782 if (QDF_IS_STATUS_ERROR(status)) {
783 wma_err("failed to set wmi_vdev_param_ap_keepalive_max_idle_inactive_time_secs");
784 goto error;
785 }
786 status = mlme_check_index_setparam(
787 setparam,
788 wmi_vdev_param_ap_keepalive_max_unresponsive_time_secs,
789 max_unresponsive_time, index++,
790 MAX_VDEV_SAP_KEEPALIVE_PARAMS);
791 if (QDF_IS_STATUS_ERROR(status)) {
792 wma_err("failed to set wmi_vdev_param_ap_keepalive_max_unresponsive_time_secs");
793 goto error;
794 }
795
796 status = wma_send_multi_pdev_vdev_set_params(MLME_VDEV_SETPARAM,
797 vdev_id, setparam, index);
798 if (QDF_IS_STATUS_ERROR(status))
799 wma_err("Failed to Set AP MIN/MAX IDLE INACTIVE TIME, MAX UNRESPONSIVE TIME:%d", status);
800 else
801 wma_debug("vdev_id:%d min_inactive_time: %u max_inactive_time: %u max_unresponsive_time: %u",
802 vdev_id, min_inactive_time, max_inactive_time,
803 max_unresponsive_time);
804 error:
805 return;
806 }
807
808 /**
809 * wma_set_sta_sa_query_param() - set sta sa query parameters
810 * @wma: wma handle
811 * @vdev_id: vdev id
812
813 * This function sets sta query related parameters in fw.
814 *
815 * Return: none
816 */
817
wma_set_sta_sa_query_param(tp_wma_handle wma,uint8_t vdev_id)818 void wma_set_sta_sa_query_param(tp_wma_handle wma,
819 uint8_t vdev_id)
820 {
821 struct mac_context *mac = cds_get_context(QDF_MODULE_ID_PE);
822 uint8_t max_retries;
823 uint16_t retry_interval;
824
825 if (!mac) {
826 wma_err("mac context is NULL");
827 return;
828 }
829
830 max_retries = mac->mlme_cfg->gen.pmf_sa_query_max_retries;
831 retry_interval = mac->mlme_cfg->gen.pmf_sa_query_retry_interval;
832
833 wmi_unified_set_sta_sa_query_param_cmd(wma->wmi_handle,
834 vdev_id,
835 max_retries,
836 retry_interval);
837 }
838
839 /**
840 * wma_set_sta_keep_alive() - set sta keep alive parameters
841 * @wma: wma handle
842 * @vdev_id: vdev id
843 * @method: method for keep alive
844 * @timeperiod: time period
845 * @hostv4addr: host ipv4 address
846 * @destv4addr: dst ipv4 address
847 * @destmac: destination mac
848 *
849 * This function sets keep alive related parameters in fw.
850 *
851 * Return: none
852 */
wma_set_sta_keep_alive(tp_wma_handle wma,uint8_t vdev_id,uint32_t method,uint32_t timeperiod,uint8_t * hostv4addr,uint8_t * destv4addr,uint8_t * destmac)853 void wma_set_sta_keep_alive(tp_wma_handle wma, uint8_t vdev_id,
854 uint32_t method, uint32_t timeperiod,
855 uint8_t *hostv4addr, uint8_t *destv4addr,
856 uint8_t *destmac)
857 {
858 struct sta_keep_alive_params params = { 0 };
859 struct wma_txrx_node *intr;
860
861 if (wma_validate_handle(wma))
862 return;
863
864 intr = &wma->interfaces[vdev_id];
865 if (timeperiod > cfg_max(CFG_INFRA_STA_KEEP_ALIVE_PERIOD)) {
866 wmi_err("Invalid period %d Max limit %d", timeperiod,
867 cfg_max(CFG_INFRA_STA_KEEP_ALIVE_PERIOD));
868 return;
869 }
870
871 params.vdev_id = vdev_id;
872 params.method = method;
873 params.timeperiod = timeperiod;
874 if (intr) {
875 if (intr->bss_max_idle_period) {
876 if (intr->bss_max_idle_period < timeperiod)
877 params.timeperiod = intr->bss_max_idle_period;
878
879 if (method == WMI_KEEP_ALIVE_NULL_PKT)
880 params.method = WMI_KEEP_ALIVE_MGMT_FRAME;
881 }
882
883 wlan_mlme_set_keepalive_period(intr->vdev, params.timeperiod);
884 }
885
886 if (hostv4addr)
887 qdf_mem_copy(params.hostv4addr, hostv4addr, QDF_IPV4_ADDR_SIZE);
888 if (destv4addr)
889 qdf_mem_copy(params.destv4addr, destv4addr, QDF_IPV4_ADDR_SIZE);
890 if (destmac)
891 qdf_mem_copy(params.destmac, destmac, QDF_MAC_ADDR_SIZE);
892
893 wmi_unified_set_sta_keep_alive_cmd(wma->wmi_handle, ¶ms);
894 }
895
896 /*
897 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
898 * 0 for no restriction
899 * 1 for 1/4 us - Our lower layer calculations limit our precision to 1 msec
900 * 2 for 1/2 us - Our lower layer calculations limit our precision to 1 msec
901 * 3 for 1 us
902 * 4 for 2 us
903 * 5 for 4 us
904 * 6 for 8 us
905 * 7 for 16 us
906 */
907 static const uint8_t wma_mpdu_spacing[] = { 0, 1, 1, 1, 2, 4, 8, 16 };
908
909 /**
910 * wma_parse_mpdudensity() - give mpdu spacing from mpdu density
911 * @mpdudensity: mpdu density
912 *
913 * Return: mpdu spacing or 0 for error
914 */
wma_parse_mpdudensity(uint8_t mpdudensity)915 static inline uint8_t wma_parse_mpdudensity(uint8_t mpdudensity)
916 {
917 if (mpdudensity < sizeof(wma_mpdu_spacing))
918 return wma_mpdu_spacing[mpdudensity];
919 else
920 return 0;
921 }
922
923 #define CFG_CTRL_MASK 0xFF00
924 #define CFG_DATA_MASK 0x00FF
925
926 /**
927 * wma_mask_tx_ht_rate() - mask tx ht rate based on config
928 * @wma: wma handle
929 * @mcs_set mcs set buffer
930 *
931 * Return: None
932 */
wma_mask_tx_ht_rate(tp_wma_handle wma,uint8_t * mcs_set)933 static void wma_mask_tx_ht_rate(tp_wma_handle wma, uint8_t *mcs_set)
934 {
935 uint32_t i, j;
936 uint16_t mcs_limit;
937 uint8_t *rate_pos = mcs_set;
938 struct mac_context *mac = wma->mac_context;
939
940 /*
941 * Get MCS limit from ini configure, and map it to rate parameters
942 * This will limit HT rate upper bound. CFG_CTRL_MASK is used to
943 * check whether ini config is enabled and CFG_DATA_MASK to get the
944 * MCS value.
945 */
946 mcs_limit = mac->mlme_cfg->rates.max_htmcs_txdata;
947
948 if (mcs_limit & CFG_CTRL_MASK) {
949 wma_debug("set mcs_limit %x", mcs_limit);
950
951 mcs_limit &= CFG_DATA_MASK;
952 for (i = 0, j = 0; i < MAX_SUPPORTED_RATES;) {
953 if (j < mcs_limit / 8) {
954 rate_pos[j] = 0xff;
955 j++;
956 i += 8;
957 } else if (j < mcs_limit / 8 + 1) {
958 if (i <= mcs_limit)
959 rate_pos[i / 8] |= 1 << (i % 8);
960 else
961 rate_pos[i / 8] &= ~(1 << (i % 8));
962 i++;
963
964 if (i >= (j + 1) * 8)
965 j++;
966 } else {
967 rate_pos[j++] = 0;
968 i += 8;
969 }
970 }
971 }
972 }
973
974 #if SUPPORT_11AX
975 /**
976 * wma_fw_to_host_phymode_11ax() - convert fw to host phymode for 11ax phymodes
977 * @phymode: phymode to convert
978 *
979 * Return: one of the 11ax values defined in enum wlan_phymode;
980 * or WLAN_PHYMODE_AUTO if the input is not an 11ax phymode
981 */
982 static enum wlan_phymode
wma_fw_to_host_phymode_11ax(WMI_HOST_WLAN_PHY_MODE phymode)983 wma_fw_to_host_phymode_11ax(WMI_HOST_WLAN_PHY_MODE phymode)
984 {
985 switch (phymode) {
986 default:
987 return WLAN_PHYMODE_AUTO;
988 case WMI_HOST_MODE_11AX_HE20:
989 return WLAN_PHYMODE_11AXA_HE20;
990 case WMI_HOST_MODE_11AX_HE40:
991 return WLAN_PHYMODE_11AXA_HE40;
992 case WMI_HOST_MODE_11AX_HE80:
993 return WLAN_PHYMODE_11AXA_HE80;
994 case WMI_HOST_MODE_11AX_HE80_80:
995 return WLAN_PHYMODE_11AXA_HE80_80;
996 case WMI_HOST_MODE_11AX_HE160:
997 return WLAN_PHYMODE_11AXA_HE160;
998 case WMI_HOST_MODE_11AX_HE20_2G:
999 return WLAN_PHYMODE_11AXG_HE20;
1000 case WMI_HOST_MODE_11AX_HE40_2G:
1001 return WLAN_PHYMODE_11AXG_HE40;
1002 case WMI_HOST_MODE_11AX_HE80_2G:
1003 return WLAN_PHYMODE_11AXG_HE80;
1004 }
1005 return WLAN_PHYMODE_AUTO;
1006 }
1007 #else
1008 static enum wlan_phymode
wma_fw_to_host_phymode_11ax(WMI_HOST_WLAN_PHY_MODE phymode)1009 wma_fw_to_host_phymode_11ax(WMI_HOST_WLAN_PHY_MODE phymode)
1010 {
1011 return WLAN_PHYMODE_AUTO;
1012 }
1013 #endif
1014
1015 #ifdef WLAN_FEATURE_11BE
1016 /**
1017 * wma_fw_to_host_phymode_11be() - convert fw to host phymode for 11be phymodes
1018 * @phymode: phymode to convert
1019 *
1020 * Return: one of the 11be values defined in enum wlan_phymode;
1021 * or WLAN_PHYMODE_AUTO if the input is not an 11be phymode
1022 */
1023 static enum wlan_phymode
wma_fw_to_host_phymode_11be(WMI_HOST_WLAN_PHY_MODE phymode)1024 wma_fw_to_host_phymode_11be(WMI_HOST_WLAN_PHY_MODE phymode)
1025 {
1026 switch (phymode) {
1027 default:
1028 return WLAN_PHYMODE_AUTO;
1029 case WMI_HOST_MODE_11BE_EHT20:
1030 return WLAN_PHYMODE_11BEA_EHT20;
1031 case WMI_HOST_MODE_11BE_EHT40:
1032 return WLAN_PHYMODE_11BEA_EHT40;
1033 case WMI_HOST_MODE_11BE_EHT80:
1034 return WLAN_PHYMODE_11BEA_EHT80;
1035 case WMI_HOST_MODE_11BE_EHT160:
1036 return WLAN_PHYMODE_11BEA_EHT160;
1037 case WMI_HOST_MODE_11BE_EHT320:
1038 return WLAN_PHYMODE_11BEA_EHT320;
1039 case WMI_HOST_MODE_11BE_EHT20_2G:
1040 return WLAN_PHYMODE_11BEG_EHT20;
1041 case WMI_HOST_MODE_11BE_EHT40_2G:
1042 return WLAN_PHYMODE_11BEG_EHT40;
1043 }
1044 return WLAN_PHYMODE_AUTO;
1045 }
1046
wma_is_phymode_eht(enum wlan_phymode phymode)1047 static inline bool wma_is_phymode_eht(enum wlan_phymode phymode)
1048 {
1049 return IS_WLAN_PHYMODE_EHT(phymode);
1050 }
1051 #else
1052 static enum wlan_phymode
wma_fw_to_host_phymode_11be(WMI_HOST_WLAN_PHY_MODE phymode)1053 wma_fw_to_host_phymode_11be(WMI_HOST_WLAN_PHY_MODE phymode)
1054 {
1055 return WLAN_PHYMODE_AUTO;
1056 }
1057
wma_is_phymode_eht(enum wlan_phymode phymode)1058 static inline bool wma_is_phymode_eht(enum wlan_phymode phymode)
1059 {
1060 return false;
1061 }
1062 #endif
1063
1064 #ifdef CONFIG_160MHZ_SUPPORT
1065 /**
1066 * wma_fw_to_host_phymode_160() - convert fw to host phymode for 160 mhz
1067 * phymodes
1068 * @phymode: phymode to convert
1069 *
1070 * Return: one of the 160 mhz values defined in enum wlan_phymode;
1071 * or WLAN_PHYMODE_AUTO if the input is not a 160 mhz phymode
1072 */
1073 static enum wlan_phymode
wma_fw_to_host_phymode_160(WMI_HOST_WLAN_PHY_MODE phymode)1074 wma_fw_to_host_phymode_160(WMI_HOST_WLAN_PHY_MODE phymode)
1075 {
1076 switch (phymode) {
1077 default:
1078 return WLAN_PHYMODE_AUTO;
1079 case WMI_HOST_MODE_11AC_VHT80_80:
1080 return WLAN_PHYMODE_11AC_VHT80_80;
1081 case WMI_HOST_MODE_11AC_VHT160:
1082 return WLAN_PHYMODE_11AC_VHT160;
1083 }
1084 }
1085 #else
1086 static enum wlan_phymode
wma_fw_to_host_phymode_160(WMI_HOST_WLAN_PHY_MODE phymode)1087 wma_fw_to_host_phymode_160(WMI_HOST_WLAN_PHY_MODE phymode)
1088 {
1089 return WLAN_PHYMODE_AUTO;
1090 }
1091 #endif
1092
wma_fw_to_host_phymode(WMI_HOST_WLAN_PHY_MODE phymode)1093 enum wlan_phymode wma_fw_to_host_phymode(WMI_HOST_WLAN_PHY_MODE phymode)
1094 {
1095 enum wlan_phymode host_phymode;
1096 switch (phymode) {
1097 default:
1098 host_phymode = wma_fw_to_host_phymode_160(phymode);
1099 if (host_phymode != WLAN_PHYMODE_AUTO)
1100 return host_phymode;
1101 host_phymode = wma_fw_to_host_phymode_11ax(phymode);
1102 if (host_phymode != WLAN_PHYMODE_AUTO)
1103 return host_phymode;
1104 return wma_fw_to_host_phymode_11be(phymode);
1105 case WMI_HOST_MODE_11A:
1106 return WLAN_PHYMODE_11A;
1107 case WMI_HOST_MODE_11G:
1108 return WLAN_PHYMODE_11G;
1109 case WMI_HOST_MODE_11B:
1110 return WLAN_PHYMODE_11B;
1111 case WMI_HOST_MODE_11GONLY:
1112 return WLAN_PHYMODE_11G_ONLY;
1113 case WMI_HOST_MODE_11NA_HT20:
1114 return WLAN_PHYMODE_11NA_HT20;
1115 case WMI_HOST_MODE_11NG_HT20:
1116 return WLAN_PHYMODE_11NG_HT20;
1117 case WMI_HOST_MODE_11NA_HT40:
1118 return WLAN_PHYMODE_11NA_HT40;
1119 case WMI_HOST_MODE_11NG_HT40:
1120 return WLAN_PHYMODE_11NG_HT40;
1121 case WMI_HOST_MODE_11AC_VHT20:
1122 return WLAN_PHYMODE_11AC_VHT20;
1123 case WMI_HOST_MODE_11AC_VHT40:
1124 return WLAN_PHYMODE_11AC_VHT40;
1125 case WMI_HOST_MODE_11AC_VHT80:
1126 return WLAN_PHYMODE_11AC_VHT80;
1127 case WMI_HOST_MODE_11AC_VHT20_2G:
1128 return WLAN_PHYMODE_11AC_VHT20_2G;
1129 case WMI_HOST_MODE_11AC_VHT40_2G:
1130 return WLAN_PHYMODE_11AC_VHT40_2G;
1131 case WMI_HOST_MODE_11AC_VHT80_2G:
1132 return WLAN_PHYMODE_11AC_VHT80_2G;
1133 }
1134 }
1135
1136 #ifdef WLAN_FEATURE_11BE
wma_populate_peer_puncture(struct peer_assoc_params * peer,struct wlan_channel * des_chan)1137 static void wma_populate_peer_puncture(struct peer_assoc_params *peer,
1138 struct wlan_channel *des_chan)
1139 {
1140 peer->puncture_bitmap = des_chan->puncture_bitmap;
1141 wma_debug("Peer EHT puncture bitmap %d", peer->puncture_bitmap);
1142 }
1143
wma_populate_peer_mlo_cap(struct peer_assoc_params * peer,tpAddStaParams params)1144 static void wma_populate_peer_mlo_cap(struct peer_assoc_params *peer,
1145 tpAddStaParams params)
1146 {
1147 struct peer_assoc_ml_partner_links *ml_links;
1148 struct peer_assoc_mlo_params *mlo_params;
1149 struct peer_ml_info *ml_info;
1150 uint8_t i;
1151
1152 ml_info = ¶ms->ml_info;
1153 mlo_params = &peer->mlo_params;
1154 ml_links = &peer->ml_links;
1155
1156 /* Assoc link info */
1157 mlo_params->vdev_id = ml_info->vdev_id;
1158 mlo_params->ieee_link_id = ml_info->link_id;
1159 qdf_mem_copy(&mlo_params->chan, &ml_info->channel_info,
1160 sizeof(struct wlan_channel));
1161 qdf_mem_copy(&mlo_params->bssid, &ml_info->link_addr,
1162 QDF_MAC_ADDR_SIZE);
1163 qdf_mem_copy(&mlo_params->mac_addr, &ml_info->self_mac_addr,
1164 QDF_MAC_ADDR_SIZE);
1165
1166 mlo_params->rec_max_simultaneous_links =
1167 ml_info->rec_max_simultaneous_links;
1168
1169 /* Fill partner link info */
1170 ml_links->num_links = ml_info->num_links;
1171 for (i = 0; i < ml_links->num_links; i++) {
1172 ml_links->partner_info[i].vdev_id =
1173 ml_info->partner_info[i].vdev_id;
1174 ml_links->partner_info[i].link_id =
1175 ml_info->partner_info[i].link_id;
1176 qdf_mem_copy(&ml_links->partner_info[i].chan,
1177 &ml_info->partner_info[i].channel_info,
1178 sizeof(struct wlan_channel));
1179 qdf_mem_copy(&ml_links->partner_info[i].bssid,
1180 &ml_info->partner_info[i].link_addr,
1181 QDF_MAC_ADDR_SIZE);
1182 qdf_mem_copy(&ml_links->partner_info[i].mac_addr,
1183 &ml_info->partner_info[i].self_mac_addr,
1184 QDF_MAC_ADDR_SIZE);
1185 }
1186 }
1187 #else
wma_populate_peer_puncture(struct peer_assoc_params * peer,struct wlan_channel * des_chan)1188 static void wma_populate_peer_puncture(struct peer_assoc_params *peer,
1189 struct wlan_channel *des_chan)
1190 {
1191 }
1192
wma_populate_peer_mlo_cap(struct peer_assoc_params * peer,tpAddStaParams params)1193 static void wma_populate_peer_mlo_cap(struct peer_assoc_params *peer,
1194 tpAddStaParams params)
1195 {
1196 }
1197 #endif
1198
wma_objmgr_set_peer_mlme_nss(tp_wma_handle wma,uint8_t * mac_addr,uint8_t nss)1199 void wma_objmgr_set_peer_mlme_nss(tp_wma_handle wma, uint8_t *mac_addr,
1200 uint8_t nss)
1201 {
1202 uint8_t pdev_id;
1203 struct wlan_objmgr_peer *peer;
1204 struct peer_mlme_priv_obj *peer_priv;
1205 struct wlan_objmgr_psoc *psoc = wma->psoc;
1206
1207 pdev_id = wlan_objmgr_pdev_get_pdev_id(wma->pdev);
1208 peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr,
1209 WLAN_LEGACY_WMA_ID);
1210 if (!peer)
1211 return;
1212
1213 peer_priv = wlan_objmgr_peer_get_comp_private_obj(peer,
1214 WLAN_UMAC_COMP_MLME);
1215 if (!peer_priv) {
1216 wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
1217 return;
1218 }
1219
1220 peer_priv->nss = nss;
1221 wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
1222 }
1223
wma_objmgr_get_peer_mlme_nss(tp_wma_handle wma,uint8_t * mac_addr)1224 uint8_t wma_objmgr_get_peer_mlme_nss(tp_wma_handle wma, uint8_t *mac_addr)
1225 {
1226 uint8_t pdev_id;
1227 struct wlan_objmgr_peer *peer;
1228 struct peer_mlme_priv_obj *peer_priv;
1229 struct wlan_objmgr_psoc *psoc = wma->psoc;
1230 uint8_t nss;
1231
1232 pdev_id = wlan_objmgr_pdev_get_pdev_id(wma->pdev);
1233 peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr,
1234 WLAN_LEGACY_WMA_ID);
1235 if (!peer)
1236 return 0;
1237
1238 peer_priv = wlan_objmgr_peer_get_comp_private_obj(peer,
1239 WLAN_UMAC_COMP_MLME);
1240 if (!peer_priv) {
1241 wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
1242 return 0;
1243 }
1244
1245 nss = peer_priv->nss;
1246 wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
1247 return nss;
1248 }
1249
wma_objmgr_set_peer_mlme_phymode(tp_wma_handle wma,uint8_t * mac_addr,enum wlan_phymode phymode)1250 void wma_objmgr_set_peer_mlme_phymode(tp_wma_handle wma, uint8_t *mac_addr,
1251 enum wlan_phymode phymode)
1252 {
1253 uint8_t pdev_id;
1254 struct wlan_objmgr_peer *peer;
1255 struct wlan_objmgr_psoc *psoc = wma->psoc;
1256
1257 pdev_id = wlan_objmgr_pdev_get_pdev_id(wma->pdev);
1258 peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr,
1259 WLAN_LEGACY_WMA_ID);
1260 if (!peer)
1261 return;
1262
1263 wlan_peer_obj_lock(peer);
1264 wlan_peer_set_phymode(peer, phymode);
1265 wlan_peer_obj_unlock(peer);
1266 wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
1267 }
1268
1269 /**
1270 * wma_objmgr_set_peer_mlme_type() - set peer type to peer object
1271 * @wma: wma handle
1272 * @mac_addr: mac addr of peer
1273 * @peer_type: peer type value to set
1274 *
1275 * Return: None
1276 */
wma_objmgr_set_peer_mlme_type(tp_wma_handle wma,uint8_t * mac_addr,enum wlan_peer_type peer_type)1277 static void wma_objmgr_set_peer_mlme_type(tp_wma_handle wma,
1278 uint8_t *mac_addr,
1279 enum wlan_peer_type peer_type)
1280 {
1281 uint8_t pdev_id;
1282 struct wlan_objmgr_peer *peer;
1283 struct wlan_objmgr_psoc *psoc = wma->psoc;
1284
1285 pdev_id = wlan_objmgr_pdev_get_pdev_id(wma->pdev);
1286 peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr,
1287 WLAN_LEGACY_WMA_ID);
1288 if (!peer)
1289 return;
1290
1291 wlan_peer_obj_lock(peer);
1292 wlan_peer_set_peer_type(peer, peer_type);
1293 wlan_peer_obj_unlock(peer);
1294 wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
1295 }
1296
1297 #ifdef WLAN_FEATURE_11BE_MLO
1298
1299 #define MIN_TIMEOUT_VAL 0
1300 #define MAX_TIMEOUT_VAL 11
1301
1302 #define TIMEOUT_TO_US 6
1303
1304 /*
1305 * wma_convert_trans_timeout_us() - API to convert
1306 * emlsr transition timeout to microseconds. Refer Table 9-401h
1307 * of IEEE802.11be specification
1308 * @timeout: EMLSR transition timeout
1309 *
1310 * Return: Timeout value in microseconds
1311 */
1312 static inline uint32_t
wma_convert_trans_timeout_us(uint16_t timeout)1313 wma_convert_trans_timeout_us(uint16_t timeout)
1314 {
1315 uint32_t us = 0;
1316
1317 if (timeout > MIN_TIMEOUT_VAL && timeout < MAX_TIMEOUT_VAL) {
1318 /* timeout = 1 is for 128us*/
1319 us = (1 << (timeout + TIMEOUT_TO_US));
1320 }
1321
1322 return us;
1323 }
1324
1325 /**
1326 * wma_set_mlo_capability() - set MLO caps to the peer assoc request
1327 * @wma: wma handle
1328 * @vdev: vdev object
1329 * @params: Add sta params
1330 * @req: peer assoc request parameters
1331 *
1332 * Return: None
1333 */
wma_set_mlo_capability(tp_wma_handle wma,struct wlan_objmgr_vdev * vdev,tpAddStaParams params,struct peer_assoc_params * req)1334 static void wma_set_mlo_capability(tp_wma_handle wma,
1335 struct wlan_objmgr_vdev *vdev,
1336 tpAddStaParams params,
1337 struct peer_assoc_params *req)
1338 {
1339 uint8_t pdev_id;
1340 struct wlan_objmgr_peer *peer;
1341 struct wlan_objmgr_psoc *psoc = wma->psoc;
1342 uint16_t link_id_bitmap;
1343
1344 pdev_id = wlan_objmgr_pdev_get_pdev_id(wma->pdev);
1345 peer = wlan_objmgr_get_peer(psoc, pdev_id, req->peer_mac,
1346 WLAN_LEGACY_WMA_ID);
1347
1348 if (!peer) {
1349 wma_err("peer not valid");
1350 return;
1351 }
1352
1353 if (!qdf_is_macaddr_zero((struct qdf_mac_addr *)peer->mldaddr)) {
1354 req->mlo_params.mlo_enabled = true;
1355 req->mlo_params.mlo_assoc_link =
1356 wlan_peer_mlme_is_assoc_peer(peer);
1357 WLAN_ADDR_COPY(req->mlo_params.mld_mac, peer->mldaddr);
1358 if (policy_mgr_ml_link_vdev_need_to_be_disabled(psoc, vdev,
1359 true) ||
1360 policy_mgr_is_emlsr_sta_concurrency_present(psoc)) {
1361 req->mlo_params.mlo_force_link_inactive = 1;
1362 link_id_bitmap = 1 << params->link_id;
1363 ml_nlink_set_curr_force_inactive_state(
1364 psoc, vdev, link_id_bitmap, LINK_ADD);
1365 ml_nlink_init_concurrency_link_request(psoc, vdev);
1366 }
1367 wma_debug("assoc_link %d" QDF_MAC_ADDR_FMT ", force inactive %d link id %d",
1368 req->mlo_params.mlo_assoc_link,
1369 QDF_MAC_ADDR_REF(peer->mldaddr),
1370 req->mlo_params.mlo_force_link_inactive,
1371 params->link_id);
1372
1373 req->mlo_params.emlsr_support = params->emlsr_support;
1374 req->mlo_params.ieee_link_id = params->link_id;
1375 if (req->mlo_params.emlsr_support) {
1376 req->mlo_params.trans_timeout_us =
1377 wma_convert_trans_timeout_us(params->emlsr_trans_timeout);
1378 }
1379 req->mlo_params.msd_cap_support = params->msd_caps_present;
1380 req->mlo_params.medium_sync_duration =
1381 params->msd_caps.med_sync_duration;
1382 req->mlo_params.medium_sync_ofdm_ed_thresh =
1383 params->msd_caps.med_sync_ofdm_ed_thresh;
1384 req->mlo_params.medium_sync_max_txop_num =
1385 params->msd_caps.med_sync_max_txop_num;
1386 req->mlo_params.link_switch_in_progress =
1387 wlan_vdev_mlme_is_mlo_link_switch_in_progress(vdev);
1388 /*
1389 * Set max simultaneous links = 1 for MLSR, 2 for MLMR. The +1
1390 * is added as per the agreement with FW for backward
1391 * compatibility purposes. Our internal structures still
1392 * conform to the values as per spec i.e. 0 = MLSR, 1 = MLMR.
1393 */
1394 req->mlo_params.max_num_simultaneous_links =
1395 wlan_mlme_get_sta_mlo_simultaneous_links(psoc) + 1;
1396 } else {
1397 wma_debug("Peer MLO context is NULL");
1398 req->mlo_params.mlo_enabled = false;
1399 req->mlo_params.emlsr_support = false;
1400 }
1401 wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
1402 }
1403
wma_set_mlo_assoc_vdev(struct wlan_objmgr_vdev * vdev,struct peer_assoc_params * req)1404 static void wma_set_mlo_assoc_vdev(struct wlan_objmgr_vdev *vdev,
1405 struct peer_assoc_params *req)
1406 {
1407 if (wlan_vdev_mlme_is_mlo_vdev(vdev) &&
1408 !wlan_vdev_mlme_is_mlo_link_vdev(vdev))
1409 req->is_assoc_vdev = true;
1410 }
1411 #else
wma_set_mlo_capability(tp_wma_handle wma,struct wlan_objmgr_vdev * vdev,tpAddStaParams params,struct peer_assoc_params * req)1412 static inline void wma_set_mlo_capability(tp_wma_handle wma,
1413 struct wlan_objmgr_vdev *vdev,
1414 tpAddStaParams params,
1415 struct peer_assoc_params *req)
1416 {
1417 }
1418
wma_set_mlo_assoc_vdev(struct wlan_objmgr_vdev * vdev,struct peer_assoc_params * req)1419 static inline void wma_set_mlo_assoc_vdev(struct wlan_objmgr_vdev *vdev,
1420 struct peer_assoc_params *req)
1421 {
1422 }
1423 #endif
1424
1425 /**
1426 * wmi_unified_send_peer_assoc() - send peer assoc command to fw
1427 * @wma: wma handle
1428 * @nw_type: nw type
1429 * @params: add sta params
1430 *
1431 * This function send peer assoc command to firmware with
1432 * different parameters.
1433 *
1434 * Return: QDF_STATUS
1435 */
wma_send_peer_assoc(tp_wma_handle wma,tSirNwType nw_type,tpAddStaParams params)1436 QDF_STATUS wma_send_peer_assoc(tp_wma_handle wma,
1437 tSirNwType nw_type,
1438 tpAddStaParams params)
1439 {
1440 struct peer_assoc_params *cmd;
1441 int32_t ret, max_rates, i;
1442 uint8_t *rate_pos;
1443 wmi_rate_set peer_legacy_rates, peer_ht_rates;
1444 uint32_t num_peer_11b_rates = 0;
1445 uint32_t num_peer_11a_rates = 0;
1446 enum wlan_phymode phymode, vdev_phymode;
1447 uint32_t peer_nss = 1;
1448 struct wma_txrx_node *intr = NULL;
1449 bool is_he;
1450 bool is_eht;
1451 QDF_STATUS status;
1452 struct mac_context *mac = wma->mac_context;
1453 struct wlan_channel *des_chan;
1454 int32_t keymgmt, uccipher, authmode;
1455
1456 cmd = qdf_mem_malloc(sizeof(struct peer_assoc_params));
1457 if (!cmd) {
1458 wma_err("Failed to allocate peer_assoc_params param");
1459 return QDF_STATUS_E_NOMEM;
1460 }
1461
1462 intr = &wma->interfaces[params->smesessionId];
1463
1464 wma_mask_tx_ht_rate(wma, params->supportedRates.supportedMCSSet);
1465
1466 qdf_mem_zero(&peer_legacy_rates, sizeof(wmi_rate_set));
1467 qdf_mem_zero(&peer_ht_rates, sizeof(wmi_rate_set));
1468 qdf_mem_zero(cmd, sizeof(struct peer_assoc_params));
1469
1470 is_he = wma_is_peer_he_capable(params);
1471 is_eht = wma_is_peer_eht_capable(params);
1472 if ((params->ch_width > CH_WIDTH_40MHZ) &&
1473 ((nw_type == eSIR_11G_NW_TYPE) ||
1474 (nw_type == eSIR_11B_NW_TYPE))) {
1475 wma_err("ch_width %d sent in 11G, configure to 40MHz",
1476 params->ch_width);
1477 params->ch_width = CH_WIDTH_40MHZ;
1478 }
1479 phymode = wma_peer_phymode(nw_type, params->staType,
1480 params->htCapable, params->ch_width,
1481 params->vhtCapable, is_he, is_eht);
1482
1483 des_chan = wlan_vdev_mlme_get_des_chan(intr->vdev);
1484 vdev_phymode = des_chan->ch_phymode;
1485 if ((intr->type == WMI_VDEV_TYPE_AP) && (phymode > vdev_phymode)) {
1486 wma_nofl_debug("Peer phymode %d is not allowed. Set it equal to sap/go phymode %d",
1487 phymode, vdev_phymode);
1488 phymode = vdev_phymode;
1489 }
1490
1491 if (!mac->mlme_cfg->rates.disable_abg_rate_txdata &&
1492 !WLAN_REG_IS_6GHZ_CHAN_FREQ(des_chan->ch_freq)) {
1493 /* Legacy Rateset */
1494 rate_pos = (uint8_t *) peer_legacy_rates.rates;
1495 for (i = 0; i < SIR_NUM_11B_RATES; i++) {
1496 if (!params->supportedRates.llbRates[i])
1497 continue;
1498 rate_pos[peer_legacy_rates.num_rates++] =
1499 params->supportedRates.llbRates[i];
1500 num_peer_11b_rates++;
1501 }
1502 for (i = 0; i < SIR_NUM_11A_RATES; i++) {
1503 if (!params->supportedRates.llaRates[i])
1504 continue;
1505 rate_pos[peer_legacy_rates.num_rates++] =
1506 params->supportedRates.llaRates[i];
1507 num_peer_11a_rates++;
1508 }
1509 }
1510
1511 if ((phymode == WLAN_PHYMODE_11A && num_peer_11a_rates == 0) ||
1512 (phymode == WLAN_PHYMODE_11B && num_peer_11b_rates == 0)) {
1513 wma_warn("Invalid phy rates. phymode 0x%x, 11b_rates %d, 11a_rates %d",
1514 phymode, num_peer_11b_rates,
1515 num_peer_11a_rates);
1516 qdf_mem_free(cmd);
1517 return QDF_STATUS_E_INVAL;
1518 }
1519
1520 /* HT Rateset */
1521 max_rates = sizeof(peer_ht_rates.rates) /
1522 sizeof(peer_ht_rates.rates[0]);
1523 rate_pos = (uint8_t *) peer_ht_rates.rates;
1524 for (i = 0; i < MAX_SUPPORTED_RATES; i++) {
1525 if (params->supportedRates.supportedMCSSet[i / 8] &
1526 (1 << (i % 8))) {
1527 rate_pos[peer_ht_rates.num_rates++] = i;
1528 if (i >= 8) {
1529 /* MCS8 or higher rate is present, must be 2x2 */
1530 peer_nss = 2;
1531 }
1532 }
1533 if (peer_ht_rates.num_rates == max_rates)
1534 break;
1535 }
1536
1537 if (params->htCapable && !peer_ht_rates.num_rates) {
1538 uint8_t temp_ni_rates[8] = { 0x0, 0x1, 0x2, 0x3,
1539 0x4, 0x5, 0x6, 0x7};
1540 /*
1541 * Workaround for EV 116382: The peer is marked HT but with
1542 * supported rx mcs set is set to 0. 11n spec mandates MCS0-7
1543 * for a HT STA. So forcing the supported rx mcs rate to
1544 * MCS 0-7. This workaround will be removed once we get
1545 * clarification from WFA regarding this STA behavior.
1546 */
1547
1548 /* TODO: Do we really need this? */
1549 wma_warn("Peer is marked as HT capable but supported mcs rate is 0");
1550 peer_ht_rates.num_rates = sizeof(temp_ni_rates);
1551 qdf_mem_copy((uint8_t *) peer_ht_rates.rates, temp_ni_rates,
1552 peer_ht_rates.num_rates);
1553 }
1554
1555 /* in ap mode and for tdls peer, use mac address of the peer in
1556 * the other end as the new peer address; in sta mode, use bss id to
1557 * be the new peer address
1558 */
1559 if ((wma_is_vdev_in_ap_mode(wma, params->smesessionId))
1560 #ifdef FEATURE_WLAN_TDLS
1561 || (STA_ENTRY_TDLS_PEER == params->staType)
1562 #endif /* FEATURE_WLAN_TDLS */
1563 ) {
1564 qdf_mem_copy(cmd->peer_mac, params->staMac,
1565 sizeof(cmd->peer_mac));
1566 } else {
1567 qdf_mem_copy(cmd->peer_mac, params->bssId,
1568 sizeof(cmd->peer_mac));
1569 }
1570 wma_objmgr_set_peer_mlme_phymode(wma, cmd->peer_mac, phymode);
1571
1572 cmd->vdev_id = params->smesessionId;
1573 cmd->peer_new_assoc = 1;
1574 cmd->peer_associd = params->assocId;
1575
1576 cmd->is_wme_set = 1;
1577
1578 if (params->wmmEnabled)
1579 cmd->qos_flag = 1;
1580
1581 if (params->uAPSD) {
1582 cmd->apsd_flag = 1;
1583 wma_nofl_debug("Set WMI_PEER_APSD: uapsd Mask %d",
1584 params->uAPSD);
1585 }
1586
1587 if (params->htCapable) {
1588 cmd->ht_flag = 1;
1589 cmd->qos_flag = 1;
1590 cmd->peer_rate_caps |= WMI_RC_HT_FLAG;
1591 }
1592
1593 if (params->vhtCapable) {
1594 cmd->ht_flag = 1;
1595 cmd->qos_flag = 1;
1596 cmd->vht_flag = 1;
1597 cmd->peer_rate_caps |= WMI_RC_HT_FLAG;
1598 }
1599
1600 if (params->ch_width) {
1601 cmd->peer_rate_caps |= WMI_RC_CW40_FLAG;
1602 if (params->fShortGI40Mhz)
1603 cmd->peer_rate_caps |= WMI_RC_SGI_FLAG;
1604 } else if (params->fShortGI20Mhz) {
1605 cmd->peer_rate_caps |= WMI_RC_SGI_FLAG;
1606 }
1607
1608 switch (params->ch_width) {
1609 case CH_WIDTH_320MHZ:
1610 wma_set_peer_assoc_params_bw_320(cmd, params->ch_width);
1611 fallthrough;
1612 case CH_WIDTH_80P80MHZ:
1613 case CH_WIDTH_160MHZ:
1614 cmd->bw_160 = 1;
1615 fallthrough;
1616 case CH_WIDTH_80MHZ:
1617 cmd->bw_80 = 1;
1618 fallthrough;
1619 case CH_WIDTH_40MHZ:
1620 cmd->bw_40 = 1;
1621 fallthrough;
1622 default:
1623 break;
1624 }
1625
1626 cmd->peer_vht_caps = params->vht_caps;
1627 if (params->p2pCapableSta) {
1628 cmd->p2p_capable_sta = 1;
1629 wma_objmgr_set_peer_mlme_type(wma, params->staMac,
1630 WLAN_PEER_P2P_CLI);
1631 }
1632
1633 if (params->rmfEnabled)
1634 cmd->is_pmf_enabled = 1;
1635
1636 if (params->stbc_capable)
1637 cmd->stbc_flag = 1;
1638
1639 if (params->htLdpcCapable || params->vhtLdpcCapable)
1640 cmd->ldpc_flag = 1;
1641
1642 switch (params->mimoPS) {
1643 case eSIR_HT_MIMO_PS_STATIC:
1644 cmd->static_mimops_flag = 1;
1645 break;
1646 case eSIR_HT_MIMO_PS_DYNAMIC:
1647 cmd->dynamic_mimops_flag = 1;
1648 break;
1649 case eSIR_HT_MIMO_PS_NO_LIMIT:
1650 cmd->spatial_mux_flag = 1;
1651 break;
1652 default:
1653 break;
1654 }
1655
1656 wma_set_twt_peer_caps(params, cmd);
1657 #ifdef FEATURE_WLAN_TDLS
1658 if (STA_ENTRY_TDLS_PEER == params->staType)
1659 cmd->auth_flag = 1;
1660 #endif /* FEATURE_WLAN_TDLS */
1661
1662 if (params->wpa_rsn
1663 #ifdef FEATURE_WLAN_WAPI
1664 || params->encryptType == eSIR_ED_WPI
1665 #endif /* FEATURE_WLAN_WAPI */
1666 ) {
1667 if (!params->no_ptk_4_way) {
1668 cmd->need_ptk_4_way = 1;
1669 wlan_acquire_peer_key_wakelock(wma->pdev,
1670 cmd->peer_mac);
1671 }
1672 }
1673
1674 if (params->wpa_rsn >> 1)
1675 cmd->need_gtk_2_way = 1;
1676
1677 #ifdef FEATURE_WLAN_WAPI
1678 if (params->encryptType == eSIR_ED_WPI) {
1679 ret = wma_vdev_set_param(wma->wmi_handle, params->smesessionId,
1680 wmi_vdev_param_drop_unencry, false);
1681 if (ret) {
1682 wma_err("Set wmi_vdev_param_drop_unencry Param status:%d",
1683 ret);
1684 qdf_mem_free(cmd);
1685 return ret;
1686 }
1687 }
1688 #endif /* FEATURE_WLAN_WAPI */
1689
1690 cmd->peer_caps = params->capab_info;
1691 cmd->peer_listen_intval = params->listenInterval;
1692 cmd->peer_ht_caps = params->ht_caps;
1693 cmd->peer_max_mpdu = (1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR +
1694 params->maxAmpduSize)) - 1;
1695 cmd->peer_mpdu_density = wma_parse_mpdudensity(params->maxAmpduDensity);
1696
1697 if (params->supportedRates.supportedMCSSet[1] &&
1698 params->supportedRates.supportedMCSSet[2])
1699 cmd->peer_rate_caps |= WMI_RC_TS_FLAG;
1700 else if (params->supportedRates.supportedMCSSet[1])
1701 cmd->peer_rate_caps |= WMI_RC_DS_FLAG;
1702
1703 /* Update peer legacy rate information */
1704 cmd->peer_legacy_rates.num_rates = peer_legacy_rates.num_rates;
1705 qdf_mem_copy(cmd->peer_legacy_rates.rates, peer_legacy_rates.rates,
1706 peer_legacy_rates.num_rates);
1707
1708 /* Update peer HT rate information */
1709 cmd->peer_ht_rates.num_rates = peer_ht_rates.num_rates;
1710 qdf_mem_copy(cmd->peer_ht_rates.rates, peer_ht_rates.rates,
1711 peer_ht_rates.num_rates);
1712
1713 /* VHT Rates */
1714
1715 cmd->peer_nss = peer_nss;
1716 /*
1717 * Because of DBS a vdev may come up in any of the two MACs with
1718 * different capabilities. STBC capab should be fetched for given
1719 * hard_mode->MAC_id combo. It is planned that firmware should provide
1720 * these dev capabilities. But for now number of tx streams can be used
1721 * to identify if Tx STBC needs to be disabled.
1722 */
1723 if (intr->tx_streams < 2) {
1724 cmd->peer_vht_caps &= ~(1 << SIR_MAC_VHT_CAP_TXSTBC);
1725 wma_nofl_debug("Num tx_streams: %d, Disabled txSTBC",
1726 intr->tx_streams);
1727 }
1728
1729 cmd->vht_capable = params->vhtCapable;
1730 if (params->vhtCapable) {
1731 #define VHT2x2MCSMASK 0xc
1732 cmd->rx_max_rate = params->supportedRates.vhtRxHighestDataRate;
1733 cmd->rx_mcs_set = params->supportedRates.vhtRxMCSMap;
1734 cmd->tx_max_rate = params->supportedRates.vhtTxHighestDataRate;
1735 cmd->tx_mcs_set = params->supportedRates.vhtTxMCSMap;
1736 /*
1737 * tx_mcs_set is intersection of self tx NSS and peer rx mcs map
1738 */
1739 if (params->vhtSupportedRxNss)
1740 cmd->peer_nss = params->vhtSupportedRxNss;
1741 else
1742 cmd->peer_nss = ((cmd->tx_mcs_set & VHT2x2MCSMASK)
1743 == VHT2x2MCSMASK) ? 1 : 2;
1744
1745 if (params->vht_mcs_10_11_supp) {
1746 WMI_SET_BITS(cmd->tx_mcs_set, 16, cmd->peer_nss,
1747 ((1 << cmd->peer_nss) - 1));
1748 WMI_VHT_MCS_NOTIFY_EXT_SS_SET(cmd->tx_mcs_set, 1);
1749 }
1750 if (params->vht_extended_nss_bw_cap &&
1751 (params->vht_160mhz_nss || params->vht_80p80mhz_nss)) {
1752 /*
1753 * bit[2:0] : Represents value of Rx NSS for 160 MHz
1754 * bit[5:3] : Represents value of Rx NSS for 80_80 MHz
1755 * Extended NSS support
1756 * bit[30:6]: Reserved
1757 * bit[31] : MSB(0/1): 1 in case of valid data
1758 */
1759 cmd->peer_bw_rxnss_override |= (1 << 31);
1760 if (params->vht_160mhz_nss)
1761 cmd->peer_bw_rxnss_override |=
1762 (params->vht_160mhz_nss - 1);
1763 if (params->vht_80p80mhz_nss)
1764 cmd->peer_bw_rxnss_override |=
1765 ((params->vht_80p80mhz_nss - 1) << 3);
1766 wma_debug("peer_bw_rxnss_override %0X",
1767 cmd->peer_bw_rxnss_override);
1768 }
1769 }
1770
1771 wma_set_mlo_capability(wma, intr->vdev, params, cmd);
1772
1773 wma_set_mlo_assoc_vdev(intr->vdev, cmd);
1774
1775 wma_debug("rx_max_rate %d, rx_mcs %x, tx_max_rate %d, tx_mcs: %x num rates %d need 4 way %d",
1776 cmd->rx_max_rate, cmd->rx_mcs_set, cmd->tx_max_rate,
1777 cmd->tx_mcs_set, peer_ht_rates.num_rates,
1778 cmd->need_ptk_4_way);
1779
1780 /*
1781 * Limit nss to max number of rf chain supported by target
1782 * Otherwise Fw will crash
1783 */
1784 if (cmd->peer_nss > WMA_MAX_NSS) {
1785 wma_err("peer Nss %d is more than supported", cmd->peer_nss);
1786 cmd->peer_nss = WMA_MAX_NSS;
1787 }
1788
1789 wma_populate_peer_he_cap(cmd, params);
1790 wma_populate_peer_eht_cap(cmd, params);
1791 wma_populate_peer_puncture(cmd, des_chan);
1792 wma_populate_peer_mlo_cap(cmd, params);
1793 if (!wma_is_vdev_in_ap_mode(wma, params->smesessionId))
1794 intr->nss = cmd->peer_nss;
1795 wma_objmgr_set_peer_mlme_nss(wma, cmd->peer_mac, cmd->peer_nss);
1796
1797 /* Till conversion is not done in WMI we need to fill fw phy mode */
1798 cmd->peer_phymode = wmi_host_to_fw_phymode(phymode);
1799
1800 keymgmt = wlan_crypto_get_param(intr->vdev, WLAN_CRYPTO_PARAM_KEY_MGMT);
1801 authmode = wlan_crypto_get_param(intr->vdev,
1802 WLAN_CRYPTO_PARAM_AUTH_MODE);
1803 uccipher = wlan_crypto_get_param(intr->vdev,
1804 WLAN_CRYPTO_PARAM_UCAST_CIPHER);
1805
1806 cmd->akm = cm_crypto_authmode_to_wmi_authmode(authmode,
1807 keymgmt,
1808 uccipher);
1809
1810 status = wmi_unified_peer_assoc_send(wma->wmi_handle,
1811 cmd);
1812 if (QDF_IS_STATUS_ERROR(status))
1813 wma_alert("Failed to send peer assoc command status = %d",
1814 status);
1815 qdf_mem_free(cmd);
1816
1817 return status;
1818 }
1819
1820 /**
1821 * wmi_unified_vdev_set_gtx_cfg_send() - set GTX params
1822 * @wmi_handle: wmi handle
1823 * @if_id: vdev id
1824 * @gtx_info: GTX config params
1825 *
1826 * This function set GTX related params in firmware.
1827 *
1828 * Return: 0 for success or error code
1829 */
wmi_unified_vdev_set_gtx_cfg_send(wmi_unified_t wmi_handle,uint32_t if_id,gtx_config_t * gtx_info)1830 QDF_STATUS wmi_unified_vdev_set_gtx_cfg_send(wmi_unified_t wmi_handle,
1831 uint32_t if_id,
1832 gtx_config_t *gtx_info)
1833 {
1834 struct wmi_gtx_config params;
1835
1836 params.gtx_rt_mask[0] = gtx_info->gtxRTMask[0];
1837 params.gtx_rt_mask[1] = gtx_info->gtxRTMask[1];
1838 params.gtx_usrcfg = gtx_info->gtxUsrcfg;
1839 params.gtx_threshold = gtx_info->gtxPERThreshold;
1840 params.gtx_margin = gtx_info->gtxPERMargin;
1841 params.gtx_tpcstep = gtx_info->gtxTPCstep;
1842 params.gtx_tpcmin = gtx_info->gtxTPCMin;
1843 params.gtx_bwmask = gtx_info->gtxBWMask;
1844
1845 return wmi_unified_vdev_set_gtx_cfg_cmd(wmi_handle,
1846 if_id, ¶ms);
1847
1848 }
1849
1850 /**
1851 * wma_update_protection_mode() - update protection mode
1852 * @wma: wma handle
1853 * @vdev_id: vdev id
1854 * @llbcoexist: protection mode info
1855 *
1856 * This function set protection mode(RTS/CTS) to fw for passed vdev id.
1857 *
1858 * Return: none
1859 */
wma_update_protection_mode(tp_wma_handle wma,uint8_t vdev_id,uint8_t llbcoexist)1860 void wma_update_protection_mode(tp_wma_handle wma, uint8_t vdev_id,
1861 uint8_t llbcoexist)
1862 {
1863 QDF_STATUS ret;
1864 enum ieee80211_protmode prot_mode;
1865
1866 prot_mode = llbcoexist ? IEEE80211_PROT_CTSONLY : IEEE80211_PROT_NONE;
1867
1868 ret = wma_vdev_set_param(wma->wmi_handle, vdev_id,
1869 wmi_vdev_param_protection_mode,
1870 prot_mode);
1871
1872 if (QDF_IS_STATUS_ERROR(ret))
1873 wma_err("Failed to send wmi protection mode cmd");
1874 else
1875 wma_nofl_debug("Updated protection mode %d to target",
1876 prot_mode);
1877 }
1878
1879 void
wma_update_beacon_interval(tp_wma_handle wma,uint8_t vdev_id,uint16_t beaconInterval)1880 wma_update_beacon_interval(tp_wma_handle wma, uint8_t vdev_id,
1881 uint16_t beaconInterval)
1882 {
1883 QDF_STATUS ret;
1884
1885 ret = wma_vdev_set_param(wma->wmi_handle, vdev_id,
1886 wmi_vdev_param_beacon_interval,
1887 beaconInterval);
1888
1889 if (QDF_IS_STATUS_ERROR(ret))
1890 wma_err("Failed to update beacon interval");
1891 else
1892 wma_info("Updated beacon interval %d for vdev %d",
1893 beaconInterval, vdev_id);
1894 }
1895
1896 #ifdef WLAN_FEATURE_11AX_BSS_COLOR
1897 /**
1898 * wma_update_bss_color() - update beacon bss color in fw
1899 * @wma: wma handle
1900 * @vdev_id: vdev id
1901 * @he_ops: HE operation, only the bss_color and bss_color_disabled fields
1902 * are updated.
1903 *
1904 * Return: none
1905 */
1906 static void
wma_update_bss_color(tp_wma_handle wma,uint8_t vdev_id,tUpdateBeaconParams * bcn_params)1907 wma_update_bss_color(tp_wma_handle wma, uint8_t vdev_id,
1908 tUpdateBeaconParams *bcn_params)
1909 {
1910 QDF_STATUS ret;
1911 uint32_t dword_he_ops = 0;
1912
1913 WMI_HEOPS_COLOR_SET(dword_he_ops, bcn_params->bss_color);
1914 WMI_HEOPS_BSSCOLORDISABLE_SET(dword_he_ops,
1915 bcn_params->bss_color_disabled);
1916 wma_nofl_debug("vdev: %d, update bss color, HE_OPS: 0x%x",
1917 vdev_id, dword_he_ops);
1918 ret = wma_vdev_set_param(wma->wmi_handle, vdev_id,
1919 wmi_vdev_param_he_bss_color, dword_he_ops);
1920 if (QDF_IS_STATUS_ERROR(ret))
1921 wma_err("Failed to update HE operations");
1922 }
1923 #else
wma_update_bss_color(tp_wma_handle wma,uint8_t vdev_id,tUpdateBeaconParams * bcn_params)1924 static void wma_update_bss_color(tp_wma_handle wma, uint8_t vdev_id,
1925 tUpdateBeaconParams *bcn_params)
1926 {
1927 }
1928 #endif
1929
1930 /**
1931 * wma_process_update_beacon_params() - update beacon parameters to target
1932 * @wma: wma handle
1933 * @bcn_params: beacon parameters
1934 *
1935 * Return: none
1936 */
1937 void
wma_process_update_beacon_params(tp_wma_handle wma,tUpdateBeaconParams * bcn_params)1938 wma_process_update_beacon_params(tp_wma_handle wma,
1939 tUpdateBeaconParams *bcn_params)
1940 {
1941 if (!bcn_params) {
1942 wma_err("bcn_params NULL");
1943 return;
1944 }
1945
1946 if (bcn_params->vdev_id >= wma->max_bssid) {
1947 wma_err("Invalid vdev id %d", bcn_params->vdev_id);
1948 return;
1949 }
1950
1951 if (bcn_params->paramChangeBitmap & PARAM_BCN_INTERVAL_CHANGED) {
1952 wma_update_beacon_interval(wma, bcn_params->vdev_id,
1953 bcn_params->beaconInterval);
1954 }
1955
1956 if (bcn_params->paramChangeBitmap & PARAM_llBCOEXIST_CHANGED)
1957 wma_update_protection_mode(wma, bcn_params->vdev_id,
1958 bcn_params->llbCoexist);
1959
1960 if (bcn_params->paramChangeBitmap & PARAM_BSS_COLOR_CHANGED)
1961 wma_update_bss_color(wma, bcn_params->vdev_id,
1962 bcn_params);
1963 }
1964
wma_update_rts_params(tp_wma_handle wma,uint32_t value)1965 void wma_update_rts_params(tp_wma_handle wma, uint32_t value)
1966 {
1967 uint8_t vdev_id;
1968 QDF_STATUS ret;
1969 struct wlan_objmgr_vdev *vdev;
1970
1971 for (vdev_id = 0; vdev_id < wma->max_bssid; vdev_id++) {
1972 vdev = wma->interfaces[vdev_id].vdev;
1973 if (!vdev)
1974 continue;
1975 ret = wma_vdev_set_param(wma->wmi_handle,
1976 vdev_id,
1977 wmi_vdev_param_rts_threshold,
1978 value);
1979 if (QDF_IS_STATUS_ERROR(ret))
1980 wma_err("Update cfg param fail for vdevId %d", vdev_id);
1981 }
1982 }
1983
wma_update_frag_params(tp_wma_handle wma,uint32_t value)1984 void wma_update_frag_params(tp_wma_handle wma, uint32_t value)
1985 {
1986 uint8_t vdev_id;
1987 QDF_STATUS ret;
1988 struct wlan_objmgr_vdev *vdev;
1989
1990 for (vdev_id = 0; vdev_id < wma->max_bssid; vdev_id++) {
1991 vdev = wma->interfaces[vdev_id].vdev;
1992 if (!vdev)
1993 continue;
1994 ret = wma_vdev_set_param(wma->wmi_handle, vdev_id,
1995 wmi_vdev_param_fragmentation_threshold,
1996 value);
1997 if (QDF_IS_STATUS_ERROR(ret))
1998 wma_err("Update cfg params failed for vdevId %d",
1999 vdev_id);
2000 }
2001 }
2002
2003 /**
2004 * wma_process_update_edca_param_req() - update EDCA params
2005 * @handle: wma handle
2006 * @edca_params: edca parameters
2007 *
2008 * This function updates EDCA parameters to the target
2009 *
2010 * Return: QDF Status
2011 */
wma_process_update_edca_param_req(WMA_HANDLE handle,tEdcaParams * edca_params)2012 QDF_STATUS wma_process_update_edca_param_req(WMA_HANDLE handle,
2013 tEdcaParams *edca_params)
2014 {
2015 tp_wma_handle wma_handle = (tp_wma_handle) handle;
2016 struct wmi_host_wme_vparams wmm_param[QCA_WLAN_AC_ALL];
2017 tSirMacEdcaParamRecord *edca_record;
2018 int ac;
2019 struct ol_tx_wmm_param_t ol_tx_wmm_param;
2020 uint8_t vdev_id;
2021 QDF_STATUS status;
2022 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
2023 uint8_t *debug_str;
2024 uint32_t len = 0;
2025
2026 vdev_id = edca_params->vdev_id;
2027 if (!wma_is_vdev_valid(vdev_id)) {
2028 wma_err("vdev id:%d is not active ", vdev_id);
2029 goto fail;
2030 }
2031
2032 debug_str = qdf_mem_malloc(WMA_WMM_DEBUG_STRING_SIZE);
2033 if (!debug_str)
2034 goto fail;
2035
2036 for (ac = 0; ac < QCA_WLAN_AC_ALL; ac++) {
2037 switch (ac) {
2038 case QCA_WLAN_AC_BE:
2039 edca_record = &edca_params->acbe;
2040 break;
2041 case QCA_WLAN_AC_BK:
2042 edca_record = &edca_params->acbk;
2043 break;
2044 case QCA_WLAN_AC_VI:
2045 edca_record = &edca_params->acvi;
2046 break;
2047 case QCA_WLAN_AC_VO:
2048 edca_record = &edca_params->acvo;
2049 break;
2050 default:
2051 qdf_mem_free(debug_str);
2052 goto fail;
2053 }
2054
2055 wma_update_edca_params_for_ac(edca_record, &wmm_param[ac], ac,
2056 edca_params->mu_edca_params,
2057 debug_str,
2058 WMA_WMM_DEBUG_STRING_SIZE, &len);
2059
2060 ol_tx_wmm_param.ac[ac].aifs = wmm_param[ac].aifs;
2061 ol_tx_wmm_param.ac[ac].cwmin = wmm_param[ac].cwmin;
2062 ol_tx_wmm_param.ac[ac].cwmax = wmm_param[ac].cwmax;
2063 }
2064
2065 wma_nofl_debug("WMM params: %s", debug_str);
2066 qdf_mem_free(debug_str);
2067
2068 status = wmi_unified_process_update_edca_param(wma_handle->wmi_handle,
2069 vdev_id,
2070 edca_params->mu_edca_params,
2071 wmm_param);
2072 if (status == QDF_STATUS_E_NOMEM)
2073 return status;
2074 else if (status == QDF_STATUS_E_FAILURE)
2075 goto fail;
2076
2077 cdp_set_wmm_param(soc, WMI_PDEV_ID_SOC, ol_tx_wmm_param);
2078
2079 return QDF_STATUS_SUCCESS;
2080
2081 fail:
2082 wma_err("Failed to set WMM Parameters");
2083 return QDF_STATUS_E_FAILURE;
2084 }
2085
2086 /**
2087 * wmi_unified_probe_rsp_tmpl_send() - send probe response template to fw
2088 * @wma: wma handle
2089 * @vdev_id: vdev id
2090 * @probe_rsp_info: probe response info
2091 *
2092 * Return: 0 for success or error code
2093 */
wmi_unified_probe_rsp_tmpl_send(tp_wma_handle wma,uint8_t vdev_id,tpSendProbeRespParams probe_rsp_info)2094 static int wmi_unified_probe_rsp_tmpl_send(tp_wma_handle wma,
2095 uint8_t vdev_id,
2096 tpSendProbeRespParams probe_rsp_info)
2097 {
2098 uint64_t adjusted_tsf_le;
2099 struct ieee80211_frame *wh;
2100 struct wmi_probe_resp_params params;
2101
2102 /*
2103 * Make the TSF offset negative so probe response in the same
2104 * staggered batch have the same TSF.
2105 */
2106 adjusted_tsf_le = cpu_to_le64(0ULL -
2107 wma->interfaces[vdev_id].tsfadjust);
2108 /* Update the timstamp in the probe response buffer with adjusted TSF */
2109 wh = (struct ieee80211_frame *)probe_rsp_info->probeRespTemplate;
2110 A_MEMCPY(&wh[1], &adjusted_tsf_le, sizeof(adjusted_tsf_le));
2111
2112 params.prb_rsp_template_len = probe_rsp_info->probeRespTemplateLen;
2113 params.prb_rsp_template_frm = probe_rsp_info->probeRespTemplate;
2114 params.go_ignore_non_p2p_probe_req =
2115 probe_rsp_info->go_ignore_non_p2p_probe_req;
2116
2117 return wmi_unified_probe_rsp_tmpl_send_cmd(wma->wmi_handle, vdev_id,
2118 ¶ms);
2119 }
2120
2121 #ifdef WLAN_FEATURE_11BE_MLO
2122 /**
2123 * wma_upt_mlo_partner_info() - update mlo info in beacon template
2124 * @params: beacon template params
2125 * @bcn_param: beacon params
2126 * @bytes_to_strip: bytes to strip
2127 *
2128 * Return: Void
2129 */
wma_upt_mlo_partner_info(struct beacon_tmpl_params * params,const tpSendbeaconParams bcn_param,uint8_t bytes_to_strip)2130 static void wma_upt_mlo_partner_info(struct beacon_tmpl_params *params,
2131 const tpSendbeaconParams bcn_param,
2132 uint8_t bytes_to_strip)
2133 {
2134 struct ml_bcn_partner_info *bcn_info;
2135 struct ml_bcn_partner_info *info;
2136 int link;
2137
2138 params->mlo_partner.num_links = bcn_param->mlo_partner.num_links;
2139 for (link = 0; link < params->mlo_partner.num_links; link++) {
2140 bcn_info = &bcn_param->mlo_partner.partner_info[link];
2141 info = ¶ms->mlo_partner.partner_info[link];
2142 info->vdev_id = bcn_info->vdev_id;
2143 info->beacon_interval = bcn_info->beacon_interval;
2144 if (bcn_info->csa_switch_count_offset &&
2145 bcn_info->csa_switch_count_offset > bytes_to_strip)
2146 info->csa_switch_count_offset =
2147 bcn_info->csa_switch_count_offset -
2148 bytes_to_strip;
2149 if (bcn_info->ext_csa_switch_count_offset &&
2150 bcn_info->ext_csa_switch_count_offset > bytes_to_strip)
2151 info->ext_csa_switch_count_offset =
2152 bcn_info->ext_csa_switch_count_offset -
2153 bytes_to_strip;
2154 }
2155 }
2156 #else
wma_upt_mlo_partner_info(struct beacon_tmpl_params * params,const tpSendbeaconParams bcn_param,uint8_t bytes_to_strip)2157 static void wma_upt_mlo_partner_info(struct beacon_tmpl_params *params,
2158 const tpSendbeaconParams bcn_param,
2159 uint8_t bytes_to_strip)
2160 {
2161 }
2162 #endif
2163
2164 /**
2165 * wma_unified_bcn_tmpl_send() - send beacon template to fw
2166 * @wma:wma handle
2167 * @vdev_id: vdev id
2168 * @bcn_info: beacon info
2169 * @bytes_to_strip: bytes to strip
2170 *
2171 * Return: QDF_STATUS_SUCCESS for success or error code
2172 */
wma_unified_bcn_tmpl_send(tp_wma_handle wma,uint8_t vdev_id,const tpSendbeaconParams bcn_info,uint8_t bytes_to_strip)2173 static QDF_STATUS wma_unified_bcn_tmpl_send(tp_wma_handle wma,
2174 uint8_t vdev_id,
2175 const tpSendbeaconParams bcn_info,
2176 uint8_t bytes_to_strip)
2177 {
2178 struct beacon_tmpl_params params = {0};
2179 uint32_t tmpl_len, tmpl_len_aligned;
2180 uint8_t *frm;
2181 QDF_STATUS ret;
2182 uint8_t *p2p_ie;
2183 uint16_t p2p_ie_len = 0;
2184 uint64_t adjusted_tsf_le;
2185 struct ieee80211_frame *wh;
2186
2187 if (!wma_is_vdev_valid(vdev_id)) {
2188 wma_err("vdev id:%d is not active ", vdev_id);
2189 return QDF_STATUS_E_INVAL;
2190 }
2191
2192 wma_nofl_debug("vdev %d: bcn update reason %d", vdev_id,
2193 bcn_info->reason);
2194
2195 if (bcn_info->p2pIeOffset) {
2196 p2p_ie = bcn_info->beacon + bcn_info->p2pIeOffset;
2197 p2p_ie_len = (uint16_t) p2p_ie[1] + 2;
2198 }
2199
2200 /*
2201 * XXX: The first byte of beacon buffer contains beacon length
2202 * only when UMAC in sending the beacon template. In othercases
2203 * (ex: from tbtt update) beacon length is read from beacon
2204 * information.
2205 */
2206 if (bytes_to_strip)
2207 tmpl_len = *(uint32_t *) &bcn_info->beacon[0];
2208 else
2209 tmpl_len = bcn_info->beaconLength;
2210
2211 if (tmpl_len > WMI_BEACON_TX_BUFFER_SIZE) {
2212 wma_err("tmpl_len: %d > %d. Invalid tmpl len", tmpl_len,
2213 WMI_BEACON_TX_BUFFER_SIZE);
2214 return -EINVAL;
2215 }
2216
2217 if (p2p_ie_len) {
2218 if (tmpl_len <= p2p_ie_len) {
2219 wma_err("tmpl_len %d <= p2p_ie_len %d, Invalid",
2220 tmpl_len, p2p_ie_len);
2221 return -EINVAL;
2222 }
2223 tmpl_len -= (uint32_t) p2p_ie_len;
2224 }
2225
2226 frm = bcn_info->beacon + bytes_to_strip;
2227 tmpl_len_aligned = roundup(tmpl_len, sizeof(A_UINT32));
2228 /*
2229 * Make the TSF offset negative so beacons in the same
2230 * staggered batch have the same TSF.
2231 */
2232 adjusted_tsf_le = cpu_to_le64(0ULL -
2233 wma->interfaces[vdev_id].tsfadjust);
2234 /* Update the timstamp in the beacon buffer with adjusted TSF */
2235 wh = (struct ieee80211_frame *)frm;
2236 A_MEMCPY(&wh[1], &adjusted_tsf_le, sizeof(adjusted_tsf_le));
2237
2238
2239
2240 params.vdev_id = vdev_id;
2241 params.tim_ie_offset = bcn_info->timIeOffset - bytes_to_strip;
2242 params.tmpl_len = tmpl_len;
2243 params.frm = frm;
2244 params.tmpl_len_aligned = tmpl_len_aligned;
2245 params.enable_bigtk =
2246 mlme_get_bigtk_support(wma->interfaces[vdev_id].vdev);
2247 if (bcn_info->csa_count_offset &&
2248 (bcn_info->csa_count_offset > bytes_to_strip))
2249 params.csa_switch_count_offset =
2250 bcn_info->csa_count_offset - bytes_to_strip;
2251 if (bcn_info->ecsa_count_offset &&
2252 (bcn_info->ecsa_count_offset > bytes_to_strip))
2253 params.ext_csa_switch_count_offset =
2254 bcn_info->ecsa_count_offset - bytes_to_strip;
2255
2256 wma_upt_mlo_partner_info(¶ms, bcn_info, bytes_to_strip);
2257
2258 ret = wmi_unified_beacon_tmpl_send_cmd(wma->wmi_handle,
2259 ¶ms);
2260 if (QDF_IS_STATUS_ERROR(ret))
2261 wma_err("Failed to send bcn tmpl: %d", ret);
2262
2263 return ret;
2264 }
2265
2266 /**
2267 * wma_store_bcn_tmpl() - store beacon template
2268 * @wma: wma handle
2269 * @vdev_id: vdev id
2270 * @bcn_info: beacon params
2271 *
2272 * This function stores beacon template locally.
2273 * This will send to target on the reception of
2274 * SWBA event.
2275 *
2276 * Return: QDF status
2277 */
wma_store_bcn_tmpl(tp_wma_handle wma,uint8_t vdev_id,tpSendbeaconParams bcn_info)2278 static QDF_STATUS wma_store_bcn_tmpl(tp_wma_handle wma, uint8_t vdev_id,
2279 tpSendbeaconParams bcn_info)
2280 {
2281 struct beacon_info *bcn;
2282 uint32_t len;
2283 uint8_t *bcn_payload;
2284 struct beacon_tim_ie *tim_ie;
2285
2286 bcn = wma->interfaces[vdev_id].beacon;
2287 if (!bcn || !bcn->buf) {
2288 wma_err("Memory is not allocated to hold bcn template");
2289 return QDF_STATUS_E_INVAL;
2290 }
2291
2292 len = *(uint32_t *) &bcn_info->beacon[0];
2293 if (len > SIR_MAX_BEACON_SIZE - sizeof(uint32_t)) {
2294 wma_err("Received beacon len %u exceeding max limit %lu",
2295 len, (unsigned long)(
2296 SIR_MAX_BEACON_SIZE - sizeof(uint32_t)));
2297 return QDF_STATUS_E_INVAL;
2298 }
2299 qdf_spin_lock_bh(&bcn->lock);
2300
2301 /*
2302 * Copy received beacon template content in local buffer.
2303 * this will be send to target on the reception of SWBA
2304 * event from target.
2305 */
2306 qdf_nbuf_trim_tail(bcn->buf, qdf_nbuf_len(bcn->buf));
2307 memcpy(qdf_nbuf_data(bcn->buf),
2308 bcn_info->beacon + 4 /* Exclude beacon length field */,
2309 len);
2310 if (bcn_info->timIeOffset > 3)
2311 bcn->tim_ie_offset = bcn_info->timIeOffset - 4;
2312 else
2313 bcn->tim_ie_offset = bcn_info->timIeOffset;
2314
2315 if (bcn_info->p2pIeOffset > 3)
2316 bcn->p2p_ie_offset = bcn_info->p2pIeOffset - 4;
2317 else
2318 bcn->p2p_ie_offset = bcn_info->p2pIeOffset;
2319
2320 if (bcn_info->csa_count_offset > 3)
2321 bcn->csa_count_offset = bcn_info->csa_count_offset - 4;
2322 else
2323 bcn->csa_count_offset = bcn_info->csa_count_offset;
2324
2325 if (bcn_info->ecsa_count_offset > 3)
2326 bcn->ecsa_count_offset = bcn_info->ecsa_count_offset - 4;
2327 else
2328 bcn->ecsa_count_offset = bcn_info->ecsa_count_offset;
2329
2330 bcn_payload = qdf_nbuf_data(bcn->buf);
2331 if (bcn->tim_ie_offset) {
2332 tim_ie = (struct beacon_tim_ie *)
2333 (&bcn_payload[bcn->tim_ie_offset]);
2334 /*
2335 * Initial Value of bcn->dtim_count will be 0.
2336 * But if the beacon gets updated then current dtim
2337 * count will be restored
2338 */
2339 tim_ie->dtim_count = bcn->dtim_count;
2340 tim_ie->tim_bitctl = 0;
2341 }
2342
2343 qdf_nbuf_put_tail(bcn->buf, len);
2344 bcn->len = len;
2345
2346 qdf_spin_unlock_bh(&bcn->lock);
2347
2348 return QDF_STATUS_SUCCESS;
2349 }
2350
wma_tbttoffset_update_event_handler(void * handle,uint8_t * event,uint32_t len)2351 int wma_tbttoffset_update_event_handler(void *handle, uint8_t *event,
2352 uint32_t len)
2353 {
2354 tp_wma_handle wma = (tp_wma_handle) handle;
2355 WMI_TBTTOFFSET_UPDATE_EVENTID_param_tlvs *param_buf;
2356 wmi_tbtt_offset_event_fixed_param *tbtt_offset_event;
2357 struct wma_txrx_node *intf;
2358 struct beacon_info *bcn;
2359 tSendbeaconParams bcn_info;
2360 uint32_t *adjusted_tsf = NULL;
2361 uint32_t if_id = 0, vdev_map;
2362
2363 if (wma_validate_handle(wma))
2364 return -EINVAL;
2365
2366 param_buf = (WMI_TBTTOFFSET_UPDATE_EVENTID_param_tlvs *) event;
2367 if (!param_buf) {
2368 wma_err("Invalid tbtt update event buffer");
2369 return -EINVAL;
2370 }
2371
2372 tbtt_offset_event = param_buf->fixed_param;
2373 intf = wma->interfaces;
2374 vdev_map = tbtt_offset_event->vdev_map;
2375 adjusted_tsf = param_buf->tbttoffset_list;
2376 if (!adjusted_tsf) {
2377 wma_err("Invalid adjusted_tsf");
2378 return -EINVAL;
2379 }
2380
2381 for (; (if_id < wma->max_bssid && vdev_map); vdev_map >>= 1, if_id++) {
2382 if (!intf[if_id].vdev)
2383 continue;
2384
2385 if (!(vdev_map & 0x1))
2386 continue;
2387
2388 bcn = intf[if_id].beacon;
2389 if (!bcn) {
2390 wma_err("Invalid beacon");
2391 return -EINVAL;
2392 }
2393 if (!bcn->buf) {
2394 wma_err("Invalid beacon buffer");
2395 return -EINVAL;
2396 }
2397 /* Save the adjusted TSF */
2398 intf[if_id].tsfadjust = adjusted_tsf[if_id];
2399
2400 qdf_spin_lock_bh(&bcn->lock);
2401 qdf_mem_zero(&bcn_info, sizeof(bcn_info));
2402 qdf_mem_copy(bcn_info.beacon,
2403 qdf_nbuf_data(bcn->buf), bcn->len);
2404 bcn_info.p2pIeOffset = bcn->p2p_ie_offset;
2405 bcn_info.beaconLength = bcn->len;
2406 bcn_info.timIeOffset = bcn->tim_ie_offset;
2407 bcn_info.csa_count_offset = bcn->csa_count_offset;
2408 bcn_info.ecsa_count_offset = bcn->ecsa_count_offset;
2409 qdf_spin_unlock_bh(&bcn->lock);
2410
2411 wma_err_rl("Update beacon template for vdev %d due to TBTT offset update",
2412 if_id);
2413 /* Update beacon template in firmware */
2414 wma_unified_bcn_tmpl_send(wma, if_id, &bcn_info, 0);
2415 }
2416 return 0;
2417 }
2418
2419 /**
2420 * wma_p2p_go_set_beacon_ie() - set beacon IE for p2p go
2421 * @wma_handle: wma handle
2422 * @vdev_id: vdev id
2423 * @p2pIe: p2p IE
2424 *
2425 * Return: 0 for success or error code
2426 */
wma_p2p_go_set_beacon_ie(t_wma_handle * wma_handle,A_UINT32 vdev_id,uint8_t * p2pIe)2427 static int wma_p2p_go_set_beacon_ie(t_wma_handle *wma_handle,
2428 A_UINT32 vdev_id, uint8_t *p2pIe)
2429 {
2430 if (wma_validate_handle(wma_handle))
2431 return QDF_STATUS_E_FAILURE;
2432
2433 return wmi_unified_p2p_go_set_beacon_ie_cmd(wma_handle->wmi_handle,
2434 vdev_id, p2pIe);
2435 }
2436
2437 /**
2438 * wma_send_probe_rsp_tmpl() - send probe resp template
2439 * @wma: wma handle
2440 * @probe_rsp_info: probe response info
2441 *
2442 * This function sends probe response template to fw which
2443 * firmware will use in case of probe response offload.
2444 *
2445 * Return: none
2446 */
wma_send_probe_rsp_tmpl(tp_wma_handle wma,tpSendProbeRespParams probe_rsp_info)2447 void wma_send_probe_rsp_tmpl(tp_wma_handle wma,
2448 tpSendProbeRespParams probe_rsp_info)
2449 {
2450 uint8_t vdev_id;
2451 struct sAniProbeRspStruct *probe_rsp;
2452
2453 if (!probe_rsp_info) {
2454 wma_err("probe_rsp_info is NULL");
2455 return;
2456 }
2457
2458 probe_rsp = (struct sAniProbeRspStruct *)
2459 (probe_rsp_info->probeRespTemplate);
2460 if (!probe_rsp) {
2461 wma_err("probe_rsp is NULL");
2462 return;
2463 }
2464
2465 if (wma_find_vdev_id_by_addr(wma, probe_rsp->macHdr.sa, &vdev_id)) {
2466 wma_err("failed to get vdev id");
2467 return;
2468 }
2469
2470 if (wmi_service_enabled(wma->wmi_handle,
2471 wmi_service_beacon_offload)) {
2472 if (wmi_unified_probe_rsp_tmpl_send(wma, vdev_id,
2473 probe_rsp_info) < 0) {
2474 wma_err("wmi_unified_probe_rsp_tmpl_send Failed");
2475 return;
2476 }
2477 }
2478 }
2479
wma_set_ap_vdev_up(tp_wma_handle wma,uint8_t vdev_id)2480 QDF_STATUS wma_set_ap_vdev_up(tp_wma_handle wma, uint8_t vdev_id)
2481 {
2482 QDF_STATUS status = QDF_STATUS_SUCCESS;
2483 struct vdev_mlme_obj *mlme_obj;
2484 struct wlan_objmgr_vdev *vdev;
2485 struct wma_txrx_node *iface;
2486
2487 iface = &wma->interfaces[vdev_id];
2488 vdev = iface->vdev;
2489 mlme_obj = wlan_vdev_mlme_get_cmpt_obj(vdev);
2490 if (!mlme_obj) {
2491 wma_err("failed to get mlme_obj");
2492 return QDF_STATUS_E_INVAL;
2493 }
2494 mlme_obj->proto.sta.assoc_id = 0;
2495
2496 status = vdev_mgr_up_send(mlme_obj);
2497 if (QDF_IS_STATUS_ERROR(status)) {
2498 wma_err("failed to send vdev up");
2499 return status;
2500 }
2501 wma_set_sap_keepalive(wma, vdev_id);
2502 wma_set_vdev_mgmt_rate(wma, vdev_id);
2503 wma_vdev_set_he_bss_params(wma, vdev_id, &mlme_obj->proto.he_ops_info);
2504 mlme_sr_update(vdev, true);
2505
2506 return status;
2507 }
2508
2509 /**
2510 * wma_send_beacon() - send beacon template
2511 * @wma: wma handle
2512 * @bcn_info: beacon info
2513 *
2514 * This function store beacon template locally and
2515 * update keep alive parameters
2516 *
2517 * Return: none
2518 */
wma_send_beacon(tp_wma_handle wma,tpSendbeaconParams bcn_info)2519 void wma_send_beacon(tp_wma_handle wma, tpSendbeaconParams bcn_info)
2520 {
2521 uint8_t vdev_id;
2522 QDF_STATUS status;
2523 uint8_t *p2p_ie;
2524 struct sAniBeaconStruct *beacon;
2525
2526 beacon = (struct sAniBeaconStruct *) (bcn_info->beacon);
2527 if (wma_find_vdev_id_by_addr(wma, beacon->macHdr.sa, &vdev_id)) {
2528 wma_err("failed to get vdev id");
2529 status = QDF_STATUS_E_INVAL;
2530 goto send_rsp;
2531 }
2532
2533 if (wmi_service_enabled(wma->wmi_handle,
2534 wmi_service_beacon_offload)) {
2535 status = wma_unified_bcn_tmpl_send(wma, vdev_id, bcn_info, 4);
2536 if (QDF_IS_STATUS_ERROR(status)) {
2537 wma_err("wmi_unified_bcn_tmpl_send Failed");
2538 goto send_rsp;
2539 }
2540
2541 if (bcn_info->p2pIeOffset) {
2542 p2p_ie = bcn_info->beacon + bcn_info->p2pIeOffset;
2543 wma_debug("p2pIe is present - vdev_id %hu, p2p_ie = %pK, p2p ie len = %hu",
2544 vdev_id, p2p_ie, p2p_ie[1]);
2545 if (wma_p2p_go_set_beacon_ie(wma, vdev_id,
2546 p2p_ie) < 0) {
2547 wma_err("wmi_unified_bcn_tmpl_send Failed");
2548 status = QDF_STATUS_E_INVAL;
2549 goto send_rsp;
2550 }
2551 }
2552 }
2553 status = wma_store_bcn_tmpl(wma, vdev_id, bcn_info);
2554 if (status != QDF_STATUS_SUCCESS) {
2555 wma_err("wma_store_bcn_tmpl Failed");
2556 goto send_rsp;
2557 }
2558
2559 send_rsp:
2560 bcn_info->status = status;
2561 wma_send_msg(wma, WMA_SEND_BCN_RSP, (void *)bcn_info, 0);
2562 }
2563
2564 /**
2565 * wma_set_keepalive_req() - send keep alive request to fw
2566 * @wma: wma handle
2567 * @keepalive: keep alive parameters
2568 *
2569 * Return: none
2570 */
wma_set_keepalive_req(tp_wma_handle wma,struct keep_alive_req * keepalive)2571 void wma_set_keepalive_req(tp_wma_handle wma,
2572 struct keep_alive_req *keepalive)
2573 {
2574 wma_nofl_debug("KEEPALIVE:PacketType:%d", keepalive->packetType);
2575 wma_set_sta_keep_alive(wma, keepalive->sessionId,
2576 keepalive->packetType,
2577 keepalive->timePeriod,
2578 keepalive->hostIpv4Addr,
2579 keepalive->destIpv4Addr,
2580 keepalive->dest_macaddr.bytes);
2581
2582 qdf_mem_free(keepalive);
2583 }
2584
2585 /**
2586 * wma_beacon_miss_handler() - beacon miss event handler
2587 * @wma: wma handle
2588 * @vdev_id: vdev id
2589 * @rssi: rssi value
2590 *
2591 * This function send beacon miss indication to upper layers.
2592 *
2593 * Return: none
2594 */
wma_beacon_miss_handler(tp_wma_handle wma,uint32_t vdev_id,int32_t rssi)2595 void wma_beacon_miss_handler(tp_wma_handle wma, uint32_t vdev_id, int32_t rssi)
2596 {
2597 struct missed_beacon_ind *beacon_miss_ind;
2598 struct mac_context *mac = cds_get_context(QDF_MODULE_ID_PE);
2599
2600 beacon_miss_ind = qdf_mem_malloc(sizeof(*beacon_miss_ind));
2601 if (!beacon_miss_ind)
2602 return;
2603
2604 if (mac && mac->sme.tx_queue_cb)
2605 mac->sme.tx_queue_cb(mac->hdd_handle, vdev_id,
2606 WLAN_STOP_ALL_NETIF_QUEUE,
2607 WLAN_CONTROL_PATH);
2608 beacon_miss_ind->messageType = WMA_MISSED_BEACON_IND;
2609 beacon_miss_ind->length = sizeof(*beacon_miss_ind);
2610 beacon_miss_ind->bss_idx = vdev_id;
2611 beacon_miss_ind->rssi = rssi;
2612
2613 wma_send_msg(wma, WMA_MISSED_BEACON_IND, beacon_miss_ind, 0);
2614 if (!wmi_service_enabled(wma->wmi_handle,
2615 wmi_service_hw_db2dbm_support))
2616 rssi += WMA_TGT_NOISE_FLOOR_DBM;
2617 wma_lost_link_info_handler(wma, vdev_id, rssi);
2618 }
2619
wlan_cm_send_beacon_miss(uint8_t vdev_id,int32_t rssi)2620 void wlan_cm_send_beacon_miss(uint8_t vdev_id, int32_t rssi)
2621 {
2622 tp_wma_handle wma;
2623
2624 wma = cds_get_context(QDF_MODULE_ID_WMA);
2625 if (!wma)
2626 return;
2627
2628 wma_beacon_miss_handler(wma, vdev_id, rssi);
2629 }
2630
2631 /**
2632 * wma_get_status_str() - get string of tx status from firmware
2633 * @status: tx status
2634 *
2635 * Return: converted string of tx status
2636 */
wma_get_status_str(uint32_t status)2637 static const char *wma_get_status_str(uint32_t status)
2638 {
2639 switch (status) {
2640 default:
2641 return "unknown";
2642 CASE_RETURN_STRING(WMI_MGMT_TX_COMP_TYPE_COMPLETE_OK);
2643 CASE_RETURN_STRING(WMI_MGMT_TX_COMP_TYPE_DISCARD);
2644 CASE_RETURN_STRING(WMI_MGMT_TX_COMP_TYPE_INSPECT);
2645 CASE_RETURN_STRING(WMI_MGMT_TX_COMP_TYPE_COMPLETE_NO_ACK);
2646 CASE_RETURN_STRING(WMI_MGMT_TX_COMP_TYPE_MAX);
2647 }
2648 }
2649
2650 #ifdef CONFIG_HL_SUPPORT
wma_mgmt_unmap_buf(tp_wma_handle wma_handle,qdf_nbuf_t buf)2651 static inline void wma_mgmt_unmap_buf(tp_wma_handle wma_handle, qdf_nbuf_t buf)
2652 {
2653 }
2654 #else
wma_mgmt_unmap_buf(tp_wma_handle wma_handle,qdf_nbuf_t buf)2655 static inline void wma_mgmt_unmap_buf(tp_wma_handle wma_handle, qdf_nbuf_t buf)
2656 {
2657 qdf_nbuf_unmap_single(wma_handle->qdf_dev, buf, QDF_DMA_TO_DEVICE);
2658 }
2659 #endif
2660
2661 #if defined(CONNECTIVITY_PKTLOG) || !defined(REMOVE_PKT_LOG)
2662 /**
2663 * wma_mgmt_qdf_status_map() - map MGMT Tx completion status with
2664 * packet dump Tx status
2665 * @status: MGMT Tx completion status
2666 *
2667 * Return: packet dump tx_status enum
2668 */
2669 static inline enum qdf_dp_tx_rx_status
wma_mgmt_qdf_status_map(WMI_MGMT_TX_COMP_STATUS_TYPE status)2670 wma_mgmt_qdf_status_map(WMI_MGMT_TX_COMP_STATUS_TYPE status)
2671 {
2672 enum qdf_dp_tx_rx_status pktdump_status;
2673
2674 switch (status) {
2675 case WMI_MGMT_TX_COMP_TYPE_COMPLETE_OK:
2676 pktdump_status = QDF_TX_RX_STATUS_OK;
2677 break;
2678 case WMI_MGMT_TX_COMP_TYPE_DISCARD:
2679 pktdump_status = QDF_TX_RX_STATUS_DROP;
2680 break;
2681 case WMI_MGMT_TX_COMP_TYPE_COMPLETE_NO_ACK:
2682 pktdump_status = QDF_TX_RX_STATUS_NO_ACK;
2683 break;
2684 default:
2685 pktdump_status = QDF_TX_RX_STATUS_DROP;
2686 break;
2687 }
2688 return pktdump_status;
2689 }
2690
2691 /**
2692 * wma_mgmt_pktdump_tx_handler() - calls tx cb if CONNECTIVITY_PKTLOG
2693 * feature is enabled
2694 * @wma_handle: wma handle
2695 * @buf: nbuf
2696 * @vdev_id : vdev id
2697 * @status : status
2698 *
2699 * Return: none
2700 */
wma_mgmt_pktdump_tx_handler(tp_wma_handle wma_handle,qdf_nbuf_t buf,uint8_t vdev_id,uint32_t status)2701 static inline void wma_mgmt_pktdump_tx_handler(tp_wma_handle wma_handle,
2702 qdf_nbuf_t buf, uint8_t vdev_id,
2703 uint32_t status)
2704 {
2705 ol_txrx_pktdump_cb packetdump_cb;
2706 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
2707 enum qdf_dp_tx_rx_status pktdump_status;
2708
2709 packetdump_cb = wma_handle->wma_mgmt_tx_packetdump_cb;
2710 pktdump_status = wma_mgmt_qdf_status_map(status);
2711 if (packetdump_cb)
2712 packetdump_cb(soc, WMI_PDEV_ID_SOC, vdev_id,
2713 buf, pktdump_status, QDF_TX_MGMT_PKT);
2714 }
2715
2716 /**
2717 * wma_mgmt_pktdump_rx_handler() - calls rx cb if CONNECTIVITY_PKTLOG
2718 * feature is enabled
2719 * @mgmt_rx_params: mgmt rx params
2720 * @rx_pkt: cds packet
2721 * @wma_handle: wma handle
2722 * mgt_type: management type
2723 * mgt_subtype: management subtype
2724 *
2725 * Return: none
2726 */
wma_mgmt_pktdump_rx_handler(struct mgmt_rx_event_params * mgmt_rx_params,cds_pkt_t * rx_pkt,tp_wma_handle wma_handle,uint8_t mgt_type,uint8_t mgt_subtype)2727 static inline void wma_mgmt_pktdump_rx_handler(
2728 struct mgmt_rx_event_params *mgmt_rx_params,
2729 cds_pkt_t *rx_pkt, tp_wma_handle wma_handle,
2730 uint8_t mgt_type, uint8_t mgt_subtype)
2731 {
2732 ol_txrx_pktdump_cb packetdump_cb;
2733 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
2734
2735 packetdump_cb = wma_handle->wma_mgmt_rx_packetdump_cb;
2736 if ((mgt_type == IEEE80211_FC0_TYPE_MGT &&
2737 mgt_subtype != MGMT_SUBTYPE_BEACON) &&
2738 packetdump_cb)
2739 packetdump_cb(soc, mgmt_rx_params->pdev_id,
2740 rx_pkt->pkt_meta.session_id, rx_pkt->pkt_buf,
2741 QDF_TX_RX_STATUS_OK, QDF_RX_MGMT_PKT);
2742 }
2743
2744 #else
wma_mgmt_pktdump_tx_handler(tp_wma_handle wma_handle,qdf_nbuf_t buf,uint8_t vdev_id,uint32_t status)2745 static inline void wma_mgmt_pktdump_tx_handler(tp_wma_handle wma_handle,
2746 qdf_nbuf_t buf, uint8_t vdev_id,
2747 uint32_t status)
2748 {
2749 }
2750
wma_mgmt_pktdump_rx_handler(struct mgmt_rx_event_params * mgmt_rx_params,cds_pkt_t * rx_pkt,tp_wma_handle wma_handle,uint8_t mgt_type,uint8_t mgt_subtype)2751 static inline void wma_mgmt_pktdump_rx_handler(
2752 struct mgmt_rx_event_params *mgmt_rx_params,
2753 cds_pkt_t *rx_pkt, tp_wma_handle wma_handle,
2754 uint8_t mgt_type, uint8_t mgt_subtype)
2755 {
2756 }
2757 #endif
2758
2759 /**
2760 * wma_process_mgmt_tx_completion() - process mgmt completion
2761 * @wma_handle: wma handle
2762 * @desc_id: descriptor id
2763 * @status: status
2764 *
2765 * Return: 0 for success or error code
2766 */
wma_process_mgmt_tx_completion(tp_wma_handle wma_handle,uint32_t desc_id,uint32_t status)2767 static int wma_process_mgmt_tx_completion(tp_wma_handle wma_handle,
2768 uint32_t desc_id, uint32_t status)
2769 {
2770 struct wlan_objmgr_pdev *pdev;
2771 qdf_nbuf_t buf = NULL;
2772 QDF_STATUS ret;
2773 uint8_t vdev_id = 0;
2774 struct wmi_mgmt_params mgmt_params = {};
2775
2776 if (wma_validate_handle(wma_handle))
2777 return -EINVAL;
2778
2779 wma_debug("status: %s wmi_desc_id: %d",
2780 wma_get_status_str(status), desc_id);
2781
2782 pdev = wma_handle->pdev;
2783 if (!pdev) {
2784 wma_err("psoc ptr is NULL");
2785 return -EINVAL;
2786 }
2787
2788 buf = mgmt_txrx_get_nbuf(pdev, desc_id);
2789
2790
2791 if (buf)
2792 wma_mgmt_unmap_buf(wma_handle, buf);
2793
2794 vdev_id = mgmt_txrx_get_vdev_id(pdev, desc_id);
2795 mgmt_params.vdev_id = vdev_id;
2796
2797 wma_mgmt_pktdump_tx_handler(wma_handle, buf, vdev_id, status);
2798 ret = mgmt_txrx_tx_completion_handler(pdev, desc_id, status,
2799 &mgmt_params);
2800
2801 if (ret != QDF_STATUS_SUCCESS) {
2802 wma_err("Failed to process mgmt tx completion");
2803 return -EINVAL;
2804 }
2805
2806 return 0;
2807 }
2808
2809 /**
2810 * wma_extract_mgmt_offload_event_params() - Extract mgmt event params
2811 * @params: Management offload event params
2812 * @hdr: Management header to extract
2813 *
2814 * Return: None
2815 */
wma_extract_mgmt_offload_event_params(struct mgmt_offload_event_params * params,wmi_mgmt_hdr * hdr)2816 static void wma_extract_mgmt_offload_event_params(
2817 struct mgmt_offload_event_params *params,
2818 wmi_mgmt_hdr *hdr)
2819 {
2820 params->tsf_l32 = hdr->tsf_l32;
2821 params->chan_freq = hdr->chan_freq;
2822 params->rate_kbps = hdr->rate_kbps;
2823 params->rssi = hdr->rssi;
2824 params->buf_len = hdr->buf_len;
2825 params->tx_status = hdr->tx_status;
2826 params->tx_retry_cnt = hdr->tx_retry_cnt;
2827 }
2828
2829 /**
2830 * wma_mgmt_tx_completion_handler() - wma mgmt Tx completion event handler
2831 * @handle: wma handle
2832 * @cmpl_event_params: completion event handler data
2833 * @len: length of @cmpl_event_params
2834 *
2835 * Return: 0 on success; error number otherwise
2836 */
2837
wma_mgmt_tx_completion_handler(void * handle,uint8_t * cmpl_event_params,uint32_t len)2838 int wma_mgmt_tx_completion_handler(void *handle, uint8_t *cmpl_event_params,
2839 uint32_t len)
2840 {
2841 tp_wma_handle wma_handle = (tp_wma_handle)handle;
2842 WMI_MGMT_TX_COMPLETION_EVENTID_param_tlvs *param_buf;
2843 wmi_mgmt_tx_compl_event_fixed_param *cmpl_params;
2844
2845 param_buf = (WMI_MGMT_TX_COMPLETION_EVENTID_param_tlvs *)
2846 cmpl_event_params;
2847 if (!param_buf || !wma_handle) {
2848 wma_err("Invalid mgmt Tx completion event");
2849 return -EINVAL;
2850 }
2851 cmpl_params = param_buf->fixed_param;
2852
2853 if ((ucfg_pkt_capture_get_pktcap_mode(wma_handle->psoc) &
2854 PKT_CAPTURE_MODE_MGMT_ONLY) && param_buf->mgmt_hdr) {
2855 struct mgmt_offload_event_params params = {0};
2856
2857 wma_extract_mgmt_offload_event_params(
2858 ¶ms,
2859 (wmi_mgmt_hdr *)param_buf->mgmt_hdr);
2860 ucfg_pkt_capture_mgmt_tx_completion(wma_handle->pdev,
2861 cmpl_params->desc_id,
2862 cmpl_params->status,
2863 ¶ms);
2864 }
2865
2866 wma_process_mgmt_tx_completion(wma_handle, cmpl_params->desc_id,
2867 cmpl_params->status);
2868
2869 return 0;
2870 }
2871
2872 /**
2873 * wma_mgmt_tx_bundle_completion_handler() - mgmt bundle comp handler
2874 * @handle: wma handle
2875 * @buf: buffer
2876 * @len: length
2877 *
2878 * Return: 0 for success or error code
2879 */
wma_mgmt_tx_bundle_completion_handler(void * handle,uint8_t * buf,uint32_t len)2880 int wma_mgmt_tx_bundle_completion_handler(void *handle, uint8_t *buf,
2881 uint32_t len)
2882 {
2883 tp_wma_handle wma_handle = (tp_wma_handle)handle;
2884 WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID_param_tlvs *param_buf;
2885 wmi_mgmt_tx_compl_bundle_event_fixed_param *cmpl_params;
2886 uint32_t num_reports;
2887 uint32_t *desc_ids;
2888 uint32_t *status;
2889 uint32_t i, buf_len;
2890 bool excess_data = false;
2891
2892 param_buf = (WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID_param_tlvs *)buf;
2893 if (!param_buf || !wma_handle) {
2894 wma_err("Invalid mgmt Tx completion event");
2895 return -EINVAL;
2896 }
2897 cmpl_params = param_buf->fixed_param;
2898 num_reports = cmpl_params->num_reports;
2899 desc_ids = (uint32_t *)(param_buf->desc_ids);
2900 status = (uint32_t *)(param_buf->status);
2901
2902 /* buf contains num_reports * sizeof(uint32) len of desc_ids and
2903 * num_reports * sizeof(uint32) status,
2904 * so (2 x (num_reports * sizeof(uint32)) should not exceed MAX
2905 */
2906 if (cmpl_params->num_reports > (WMI_SVC_MSG_MAX_SIZE /
2907 (2 * sizeof(uint32_t))))
2908 excess_data = true;
2909 else
2910 buf_len = cmpl_params->num_reports * (2 * sizeof(uint32_t));
2911
2912 if (excess_data || (sizeof(*cmpl_params) > (WMI_SVC_MSG_MAX_SIZE -
2913 buf_len))) {
2914 wma_err("excess wmi buffer: num_reports %d",
2915 cmpl_params->num_reports);
2916 return -EINVAL;
2917 }
2918
2919 if ((cmpl_params->num_reports > param_buf->num_desc_ids) ||
2920 (cmpl_params->num_reports > param_buf->num_status)) {
2921 wma_err("Invalid num_reports %d, num_desc_ids %d, num_status %d",
2922 cmpl_params->num_reports, param_buf->num_desc_ids,
2923 param_buf->num_status);
2924 return -EINVAL;
2925 }
2926
2927 for (i = 0; i < num_reports; i++) {
2928 if ((ucfg_pkt_capture_get_pktcap_mode(wma_handle->psoc) &
2929 PKT_CAPTURE_MODE_MGMT_ONLY) && param_buf->mgmt_hdr) {
2930 struct mgmt_offload_event_params params = {0};
2931
2932 wma_extract_mgmt_offload_event_params(
2933 ¶ms,
2934 &((wmi_mgmt_hdr *)param_buf->mgmt_hdr)[i]);
2935 ucfg_pkt_capture_mgmt_tx_completion(
2936 wma_handle->pdev, desc_ids[i],
2937 status[i], ¶ms);
2938 }
2939
2940 wma_process_mgmt_tx_completion(wma_handle,
2941 desc_ids[i], status[i]);
2942 }
2943 return 0;
2944 }
2945
2946 /**
2947 * wma_process_update_opmode() - process update VHT opmode cmd from UMAC
2948 * @wma_handle: wma handle
2949 * @update_vht_opmode: vht opmode
2950 *
2951 * Return: none
2952 */
wma_process_update_opmode(tp_wma_handle wma_handle,tUpdateVHTOpMode * update_vht_opmode)2953 void wma_process_update_opmode(tp_wma_handle wma_handle,
2954 tUpdateVHTOpMode *update_vht_opmode)
2955 {
2956 wmi_host_channel_width ch_width;
2957 uint8_t pdev_id;
2958 struct wlan_objmgr_peer *peer;
2959 struct wlan_objmgr_psoc *psoc = wma_handle->psoc;
2960 enum wlan_phymode peer_phymode;
2961 uint32_t fw_phymode;
2962 enum wlan_peer_type peer_type;
2963
2964 pdev_id = wlan_objmgr_pdev_get_pdev_id(wma_handle->pdev);
2965 peer = wlan_objmgr_get_peer(psoc, pdev_id,
2966 update_vht_opmode->peer_mac,
2967 WLAN_LEGACY_WMA_ID);
2968 if (!peer) {
2969 wma_err("peer object invalid");
2970 return;
2971 }
2972
2973 peer_type = wlan_peer_get_peer_type(peer);
2974 if (peer_type == WLAN_PEER_SELF) {
2975 wma_err("self peer wrongly used");
2976 wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
2977 return;
2978 }
2979
2980 wlan_peer_obj_lock(peer);
2981 peer_phymode = wlan_peer_get_phymode(peer);
2982 wlan_peer_obj_unlock(peer);
2983 wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
2984
2985 fw_phymode = wmi_host_to_fw_phymode(peer_phymode);
2986
2987 ch_width = wmi_get_ch_width_from_phy_mode(wma_handle->wmi_handle,
2988 fw_phymode);
2989 wma_debug("ch_width: %d, fw phymode: %d peer_phymode: %d, op_mode: %d",
2990 ch_width, fw_phymode, peer_phymode,
2991 update_vht_opmode->opMode);
2992
2993 if (ch_width < update_vht_opmode->opMode) {
2994 wma_err("Invalid peer bw update %d, self bw %d",
2995 update_vht_opmode->opMode, ch_width);
2996 return;
2997 }
2998
2999 wma_set_peer_param(wma_handle, update_vht_opmode->peer_mac,
3000 WMI_HOST_PEER_CHWIDTH, update_vht_opmode->opMode,
3001 update_vht_opmode->smesessionId);
3002
3003 wma_set_peer_param(wma_handle, update_vht_opmode->peer_mac,
3004 WMI_HOST_PEER_PHYMODE,
3005 fw_phymode, update_vht_opmode->smesessionId);
3006 }
3007
3008 /**
3009 * wma_process_update_rx_nss() - process update RX NSS cmd from UMAC
3010 * @wma_handle: wma handle
3011 * @update_rx_nss: rx nss value
3012 *
3013 * Return: none
3014 */
wma_process_update_rx_nss(tp_wma_handle wma_handle,tUpdateRxNss * update_rx_nss)3015 void wma_process_update_rx_nss(tp_wma_handle wma_handle,
3016 tUpdateRxNss *update_rx_nss)
3017 {
3018 struct target_psoc_info *tgt_hdl;
3019 struct wma_txrx_node *intr =
3020 &wma_handle->interfaces[update_rx_nss->smesessionId];
3021 int rx_nss = update_rx_nss->rxNss;
3022 int num_rf_chains;
3023
3024 tgt_hdl = wlan_psoc_get_tgt_if_handle(wma_handle->psoc);
3025 if (!tgt_hdl) {
3026 wma_err("target psoc info is NULL");
3027 return;
3028 }
3029
3030 num_rf_chains = target_if_get_num_rf_chains(tgt_hdl);
3031 if (rx_nss > num_rf_chains || rx_nss > WMA_MAX_NSS)
3032 rx_nss = QDF_MIN(num_rf_chains, WMA_MAX_NSS);
3033
3034 intr->nss = (uint8_t)rx_nss;
3035 update_rx_nss->rxNss = (uint32_t)rx_nss;
3036
3037 wma_debug("Rx Nss = %d", update_rx_nss->rxNss);
3038
3039 wma_set_peer_param(wma_handle, update_rx_nss->peer_mac,
3040 WMI_HOST_PEER_NSS, update_rx_nss->rxNss,
3041 update_rx_nss->smesessionId);
3042 }
3043
3044 /**
3045 * wma_process_update_membership() - process update group membership cmd
3046 * @wma_handle: wma handle
3047 * @membership: group membership info
3048 *
3049 * Return: none
3050 */
wma_process_update_membership(tp_wma_handle wma_handle,tUpdateMembership * membership)3051 void wma_process_update_membership(tp_wma_handle wma_handle,
3052 tUpdateMembership *membership)
3053 {
3054 wma_debug("membership = %x ", membership->membership);
3055
3056 wma_set_peer_param(wma_handle, membership->peer_mac,
3057 WMI_HOST_PEER_MEMBERSHIP, membership->membership,
3058 membership->smesessionId);
3059 }
3060
3061 /**
3062 * wma_process_update_userpos() - process update user pos cmd from UMAC
3063 * @wma_handle: wma handle
3064 * @userpos: user pos value
3065 *
3066 * Return: none
3067 */
wma_process_update_userpos(tp_wma_handle wma_handle,tUpdateUserPos * userpos)3068 void wma_process_update_userpos(tp_wma_handle wma_handle,
3069 tUpdateUserPos *userpos)
3070 {
3071 wma_debug("userPos = %x ", userpos->userPos);
3072
3073 wma_set_peer_param(wma_handle, userpos->peer_mac,
3074 WMI_HOST_PEER_USERPOS, userpos->userPos,
3075 userpos->smesessionId);
3076
3077 /* Now that membership/userpos is updated in fw,
3078 * enable GID PPS.
3079 */
3080 wma_set_ppsconfig(userpos->smesessionId, WMA_VHT_PPS_GID_MATCH, 1);
3081
3082 }
3083
wma_set_cts2self_for_p2p_go(void * wma_handle,uint32_t cts2self_for_p2p_go)3084 QDF_STATUS wma_set_cts2self_for_p2p_go(void *wma_handle,
3085 uint32_t cts2self_for_p2p_go)
3086 {
3087 int32_t ret;
3088 tp_wma_handle wma = (tp_wma_handle)wma_handle;
3089 struct pdev_params pdevparam = {};
3090
3091 pdevparam.param_id = wmi_pdev_param_cts2self_for_p2p_go_config;
3092 pdevparam.param_value = cts2self_for_p2p_go;
3093
3094 ret = wmi_unified_pdev_param_send(wma->wmi_handle,
3095 &pdevparam,
3096 WMA_WILDCARD_PDEV_ID);
3097 if (ret) {
3098 wma_err("Fail to Set CTS2SELF for p2p GO %d",
3099 cts2self_for_p2p_go);
3100 return QDF_STATUS_E_FAILURE;
3101 }
3102
3103 wma_nofl_debug("Successfully Set CTS2SELF for p2p GO %d",
3104 cts2self_for_p2p_go);
3105
3106 return QDF_STATUS_SUCCESS;
3107 }
3108
3109
3110 /**
3111 * wma_set_htconfig() - set ht config parameters to target
3112 * @vdev_id: vdev id
3113 * @ht_capab: ht capability
3114 * @value: value of ht param
3115 *
3116 * Return: QDF status
3117 */
wma_set_htconfig(uint8_t vdev_id,uint16_t ht_capab,int value)3118 QDF_STATUS wma_set_htconfig(uint8_t vdev_id, uint16_t ht_capab, int value)
3119 {
3120 tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
3121 QDF_STATUS ret = QDF_STATUS_E_FAILURE;
3122
3123 if (!wma)
3124 return QDF_STATUS_E_INVAL;
3125
3126 switch (ht_capab) {
3127 case WNI_CFG_HT_CAP_INFO_ADVANCE_CODING:
3128 ret = wma_vdev_set_param(wma->wmi_handle, vdev_id,
3129 wmi_vdev_param_ldpc,
3130 value);
3131 break;
3132 case WNI_CFG_HT_CAP_INFO_TX_STBC:
3133 ret = wma_vdev_set_param(wma->wmi_handle, vdev_id,
3134 wmi_vdev_param_tx_stbc,
3135 value);
3136 break;
3137 case WNI_CFG_HT_CAP_INFO_RX_STBC:
3138 ret = wma_vdev_set_param(wma->wmi_handle, vdev_id,
3139 wmi_vdev_param_rx_stbc,
3140 value);
3141 break;
3142 case WNI_CFG_HT_CAP_INFO_SHORT_GI_20MHZ:
3143 case WNI_CFG_HT_CAP_INFO_SHORT_GI_40MHZ:
3144 wma_err("ht_capab = %d, value = %d", ht_capab,
3145 value);
3146 ret = wma_vdev_set_param(wma->wmi_handle, vdev_id,
3147 wmi_vdev_param_sgi, value);
3148 if (ret == QDF_STATUS_SUCCESS)
3149 wma->interfaces[vdev_id].config.shortgi = value;
3150 break;
3151 default:
3152 wma_err("INVALID HT CONFIG");
3153 }
3154
3155 return ret;
3156 }
3157
3158 /**
3159 * wma_extract_ccmp_pn() - extract 6 byte PN from the CCMP header
3160 * @ccmp_ptr: CCMP header
3161 *
3162 * Return: PN extracted from header.
3163 */
wma_extract_ccmp_pn(uint8_t * ccmp_ptr)3164 static uint64_t wma_extract_ccmp_pn(uint8_t *ccmp_ptr)
3165 {
3166 uint8_t rsvd, key, pn[6];
3167 uint64_t new_pn;
3168
3169 /*
3170 * +-----+-----+------+----------+-----+-----+-----+-----+
3171 * | PN0 | PN1 | rsvd | rsvd/key | PN2 | PN3 | PN4 | PN5 |
3172 * +-----+-----+------+----------+-----+-----+-----+-----+
3173 * CCMP Header Format
3174 */
3175
3176 /* Extract individual bytes */
3177 pn[0] = (uint8_t) *ccmp_ptr;
3178 pn[1] = (uint8_t) *(ccmp_ptr + 1);
3179 rsvd = (uint8_t) *(ccmp_ptr + 2);
3180 key = (uint8_t) *(ccmp_ptr + 3);
3181 pn[2] = (uint8_t) *(ccmp_ptr + 4);
3182 pn[3] = (uint8_t) *(ccmp_ptr + 5);
3183 pn[4] = (uint8_t) *(ccmp_ptr + 6);
3184 pn[5] = (uint8_t) *(ccmp_ptr + 7);
3185
3186 /* Form 6 byte PN with 6 individual bytes of PN */
3187 new_pn = ((uint64_t) pn[5] << 40) |
3188 ((uint64_t) pn[4] << 32) |
3189 ((uint64_t) pn[3] << 24) |
3190 ((uint64_t) pn[2] << 16) |
3191 ((uint64_t) pn[1] << 8) | ((uint64_t) pn[0] << 0);
3192
3193 return new_pn;
3194 }
3195
3196 /**
3197 * wma_is_ccmp_pn_replay_attack() - detect replay attacking using PN in CCMP
3198 * @wma: wma context
3199 * @wh: 802.11 frame header
3200 * @ccmp_ptr: CCMP frame header
3201 *
3202 * Return: true/false
3203 */
3204 static bool
wma_is_ccmp_pn_replay_attack(tp_wma_handle wma,struct ieee80211_frame * wh,uint8_t * ccmp_ptr)3205 wma_is_ccmp_pn_replay_attack(tp_wma_handle wma, struct ieee80211_frame *wh,
3206 uint8_t *ccmp_ptr)
3207 {
3208 uint64_t new_pn;
3209 bool ret = false;
3210 struct peer_mlme_priv_obj *peer_priv;
3211 struct wlan_objmgr_peer *peer;
3212
3213 new_pn = wma_extract_ccmp_pn(ccmp_ptr);
3214
3215 peer = wlan_objmgr_get_peer_by_mac(wma->psoc, wh->i_addr2,
3216 WLAN_LEGACY_WMA_ID);
3217 if (!peer)
3218 return ret;
3219
3220 peer_priv = wlan_objmgr_peer_get_comp_private_obj(peer,
3221 WLAN_UMAC_COMP_MLME);
3222 if (!peer_priv) {
3223 wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
3224 return ret;
3225 }
3226
3227 if (peer_priv->last_pn_valid) {
3228 if (new_pn > peer_priv->last_pn) {
3229 peer_priv->last_pn = new_pn;
3230 } else {
3231 wma_err_rl("PN Replay attack detected");
3232 /* per 11W amendment, keeping track of replay attacks */
3233 peer_priv->rmf_pn_replays += 1;
3234 ret = true;
3235 }
3236 } else {
3237 peer_priv->last_pn_valid = 1;
3238 peer_priv->last_pn = new_pn;
3239 }
3240
3241 wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
3242
3243 return ret;
3244 }
3245
3246 /**
3247 * wma_process_bip() - process mmie in rmf frame
3248 * @wma_handle: wma handle
3249 * @iface: txrx node
3250 * @wh: 80211 frame
3251 * @wbuf: Buffer
3252 *
3253 * Return: 0 for success or error code
3254 */
3255 static
wma_process_bip(tp_wma_handle wma_handle,struct wma_txrx_node * iface,struct ieee80211_frame * wh,qdf_nbuf_t wbuf)3256 int wma_process_bip(tp_wma_handle wma_handle, struct wma_txrx_node *iface,
3257 struct ieee80211_frame *wh, qdf_nbuf_t wbuf)
3258 {
3259 uint16_t mmie_size;
3260 uint8_t *efrm;
3261 int32_t mgmtcipherset;
3262 enum wlan_crypto_cipher_type key_cipher;
3263
3264 efrm = qdf_nbuf_data(wbuf) + qdf_nbuf_len(wbuf);
3265
3266 mgmtcipherset = wlan_crypto_get_param(iface->vdev,
3267 WLAN_CRYPTO_PARAM_MGMT_CIPHER);
3268 if (mgmtcipherset <= 0) {
3269 wma_err("Invalid key cipher %d", mgmtcipherset);
3270 return -EINVAL;
3271 }
3272
3273 if (mgmtcipherset & (1 << WLAN_CRYPTO_CIPHER_AES_CMAC)) {
3274 key_cipher = WLAN_CRYPTO_CIPHER_AES_CMAC;
3275 mmie_size = cds_get_mmie_size();
3276 } else if (mgmtcipherset & (1 << WLAN_CRYPTO_CIPHER_AES_GMAC)) {
3277 key_cipher = WLAN_CRYPTO_CIPHER_AES_GMAC;
3278 mmie_size = cds_get_gmac_mmie_size();
3279 } else if (mgmtcipherset & (1 << WLAN_CRYPTO_CIPHER_AES_GMAC_256)) {
3280 key_cipher = WLAN_CRYPTO_CIPHER_AES_GMAC_256;
3281 mmie_size = cds_get_gmac_mmie_size();
3282 } else {
3283 wma_err("Invalid key cipher %d", mgmtcipherset);
3284 return -EINVAL;
3285 }
3286
3287 /* Check if frame is invalid length */
3288 if (efrm - (uint8_t *)wh < sizeof(*wh) + mmie_size) {
3289 wma_err("Invalid frame length");
3290 return -EINVAL;
3291 }
3292
3293 switch (key_cipher) {
3294 case WLAN_CRYPTO_CIPHER_AES_CMAC:
3295 if (!wmi_service_enabled(wma_handle->wmi_handle,
3296 wmi_service_sta_pmf_offload)) {
3297 if (!wlan_crypto_is_mmie_valid(iface->vdev,
3298 (uint8_t *)wh, efrm)) {
3299 wma_debug("BC/MC MIC error or MMIE not present, dropping the frame");
3300 return -EINVAL;
3301 }
3302 }
3303 break;
3304 case WLAN_CRYPTO_CIPHER_AES_GMAC:
3305 case WLAN_CRYPTO_CIPHER_AES_GMAC_256:
3306 if (!wmi_service_enabled(wma_handle->wmi_handle,
3307 wmi_service_gmac_offload_support)) {
3308 if (!wlan_crypto_is_mmie_valid(iface->vdev,
3309 (uint8_t *)wh, efrm)) {
3310 wma_debug("BC/MC GMAC MIC error or MMIE not present, dropping the frame");
3311 return -EINVAL;
3312 }
3313 }
3314 break;
3315 default:
3316 wma_err("Invalid key_type %d", key_cipher);
3317 return -EINVAL;
3318 }
3319
3320 qdf_nbuf_trim_tail(wbuf, mmie_size);
3321
3322 return 0;
3323 }
3324
3325 /**
3326 * wma_process_rmf_frame() - process rmf frame
3327 * @wma_handle: wma handle
3328 * @iface: txrx node
3329 * @wh: 80211 frame
3330 * @rx_pkt: rx packet
3331 * @wbuf: Buffer
3332 *
3333 * Return: 0 for success or error code
3334 */
3335 static
wma_process_rmf_frame(tp_wma_handle wma_handle,struct wma_txrx_node * iface,struct ieee80211_frame * wh,cds_pkt_t * rx_pkt,qdf_nbuf_t wbuf)3336 int wma_process_rmf_frame(tp_wma_handle wma_handle,
3337 struct wma_txrx_node *iface,
3338 struct ieee80211_frame *wh,
3339 cds_pkt_t *rx_pkt,
3340 qdf_nbuf_t wbuf)
3341 {
3342 uint8_t *orig_hdr;
3343 uint8_t *ccmp;
3344 uint8_t mic_len, hdr_len, pdev_id;
3345 QDF_STATUS status;
3346
3347 if ((wh)->i_fc[1] & IEEE80211_FC1_WEP) {
3348 if (QDF_IS_ADDR_BROADCAST(wh->i_addr1) ||
3349 IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3350 wma_err("Encrypted BC/MC frame dropping the frame");
3351 cds_pkt_return_packet(rx_pkt);
3352 return -EINVAL;
3353 }
3354
3355 if (iface->type == WMI_VDEV_TYPE_NDI ||
3356 iface->type == WMI_VDEV_TYPE_NAN) {
3357 hdr_len = IEEE80211_CCMP_HEADERLEN;
3358 mic_len = IEEE80211_CCMP_MICLEN;
3359 } else {
3360 pdev_id =
3361 wlan_objmgr_pdev_get_pdev_id(wma_handle->pdev);
3362 status = mlme_get_peer_mic_len(wma_handle->psoc,
3363 pdev_id, wh->i_addr2,
3364 &mic_len, &hdr_len);
3365 if (QDF_IS_STATUS_ERROR(status)) {
3366 wma_err("Failed to get mic hdr and length");
3367 cds_pkt_return_packet(rx_pkt);
3368 return -EINVAL;
3369 }
3370 }
3371
3372 if (qdf_nbuf_len(wbuf) < (sizeof(*wh) + hdr_len + mic_len)) {
3373 wma_err("Buffer length less than expected %d",
3374 (int)qdf_nbuf_len(wbuf));
3375 cds_pkt_return_packet(rx_pkt);
3376 return -EINVAL;
3377 }
3378
3379 orig_hdr = (uint8_t *) qdf_nbuf_data(wbuf);
3380 /* Pointer to head of CCMP header */
3381 ccmp = orig_hdr + sizeof(*wh);
3382 if (wma_is_ccmp_pn_replay_attack(wma_handle, wh, ccmp)) {
3383 wma_err_rl("Dropping the frame");
3384 cds_pkt_return_packet(rx_pkt);
3385 return -EINVAL;
3386 }
3387
3388 /* Strip privacy headers (and trailer)
3389 * for a received frame
3390 */
3391 qdf_mem_move(orig_hdr +
3392 hdr_len, wh,
3393 sizeof(*wh));
3394 qdf_nbuf_pull_head(wbuf,
3395 hdr_len);
3396 qdf_nbuf_trim_tail(wbuf, mic_len);
3397 /*
3398 * CCMP header has been pulled off
3399 * reinitialize the start pointer of mac header
3400 * to avoid accessing incorrect address
3401 */
3402 wh = (struct ieee80211_frame *) qdf_nbuf_data(wbuf);
3403 rx_pkt->pkt_meta.mpdu_hdr_ptr =
3404 qdf_nbuf_data(wbuf);
3405 rx_pkt->pkt_meta.mpdu_len = qdf_nbuf_len(wbuf);
3406 rx_pkt->pkt_buf = wbuf;
3407 if (rx_pkt->pkt_meta.mpdu_len >=
3408 rx_pkt->pkt_meta.mpdu_hdr_len) {
3409 rx_pkt->pkt_meta.mpdu_data_len =
3410 rx_pkt->pkt_meta.mpdu_len -
3411 rx_pkt->pkt_meta.mpdu_hdr_len;
3412 } else {
3413 wma_err("mpdu len %d less than hdr %d, dropping frame",
3414 rx_pkt->pkt_meta.mpdu_len,
3415 rx_pkt->pkt_meta.mpdu_hdr_len);
3416 cds_pkt_return_packet(rx_pkt);
3417 return -EINVAL;
3418 }
3419
3420 if (rx_pkt->pkt_meta.mpdu_data_len > MAX_MGMT_MPDU_LEN) {
3421 wma_err("Data Len %d greater than max, dropping frame",
3422 rx_pkt->pkt_meta.mpdu_data_len);
3423 cds_pkt_return_packet(rx_pkt);
3424 return -EINVAL;
3425 }
3426 rx_pkt->pkt_meta.mpdu_data_ptr =
3427 rx_pkt->pkt_meta.mpdu_hdr_ptr +
3428 rx_pkt->pkt_meta.mpdu_hdr_len;
3429 wma_debug("BSSID: "QDF_MAC_ADDR_FMT" tsf_delta: %u",
3430 QDF_MAC_ADDR_REF(wh->i_addr3),
3431 rx_pkt->pkt_meta.tsf_delta);
3432 } else {
3433 if (QDF_IS_ADDR_BROADCAST(wh->i_addr1) ||
3434 IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3435 if (0 != wma_process_bip(wma_handle, iface, wh, wbuf)) {
3436 cds_pkt_return_packet(rx_pkt);
3437 return -EINVAL;
3438 }
3439 } else {
3440 wma_err_rl("Rx unprotected unicast mgmt frame");
3441 rx_pkt->pkt_meta.dpuFeedback =
3442 DPU_FEEDBACK_UNPROTECTED_ERROR;
3443 }
3444 }
3445 return 0;
3446 }
3447
3448 /**
3449 * wma_get_peer_pmf_status() - Get the PMF capability of peer
3450 * @wma: wma handle
3451 * @peer_mac: peer mac addr
3452 *
3453 * Return: True if PMF is enabled, false otherwise.
3454 */
3455 static bool
wma_get_peer_pmf_status(tp_wma_handle wma,uint8_t * peer_mac)3456 wma_get_peer_pmf_status(tp_wma_handle wma, uint8_t *peer_mac)
3457 {
3458 struct wlan_objmgr_peer *peer;
3459 bool is_pmf_enabled;
3460
3461 if (!peer_mac) {
3462 wma_err("peer_mac is NULL");
3463 return false;
3464 }
3465
3466 peer = wlan_objmgr_get_peer(wma->psoc,
3467 wlan_objmgr_pdev_get_pdev_id(wma->pdev),
3468 peer_mac, WLAN_LEGACY_WMA_ID);
3469 if (!peer) {
3470 wma_debug("Peer of peer_mac " QDF_MAC_ADDR_FMT " not found",
3471 QDF_MAC_ADDR_REF(peer_mac));
3472 return false;
3473 }
3474 is_pmf_enabled = mlme_get_peer_pmf_status(peer);
3475 wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
3476 wma_nofl_debug("get is_pmf_enabled %d for "QDF_MAC_ADDR_FMT,
3477 is_pmf_enabled, QDF_MAC_ADDR_REF(peer_mac));
3478
3479 return is_pmf_enabled;
3480 }
3481
3482 /**
3483 * wma_check_and_process_rmf_frame() - Process the frame if it is of rmf type
3484 * @wma_handle: wma handle
3485 * @vdev_id: vdev id
3486 * @wh: double pointer to 802.11 frame header which will be updated if the
3487 * frame is of rmf type.
3488 * @rx_pkt: rx packet
3489 * @buf: Buffer
3490 *
3491 * Process the frame as rmf frame only if both DUT and peer are of PMF capable
3492 *
3493 * Return: 0 for success or error code
3494 */
3495 static int
wma_check_and_process_rmf_frame(tp_wma_handle wma_handle,uint8_t vdev_id,struct ieee80211_frame ** wh,cds_pkt_t * rx_pkt,qdf_nbuf_t buf)3496 wma_check_and_process_rmf_frame(tp_wma_handle wma_handle,
3497 uint8_t vdev_id,
3498 struct ieee80211_frame **wh,
3499 cds_pkt_t *rx_pkt,
3500 qdf_nbuf_t buf)
3501 {
3502 int status;
3503 struct wma_txrx_node *iface;
3504 struct ieee80211_frame *hdr = *wh;
3505
3506 iface = &(wma_handle->interfaces[vdev_id]);
3507 if ((iface->type != WMI_VDEV_TYPE_NDI &&
3508 iface->type != WMI_VDEV_TYPE_NAN) && !iface->rmfEnabled)
3509 return 0;
3510
3511 if (qdf_is_macaddr_group((struct qdf_mac_addr *)(hdr->i_addr1)) ||
3512 qdf_is_macaddr_broadcast((struct qdf_mac_addr *)(hdr->i_addr1)) ||
3513 wma_get_peer_pmf_status(wma_handle, hdr->i_addr2) ||
3514 ((iface->type == WMI_VDEV_TYPE_NDI ||
3515 iface->type == WMI_VDEV_TYPE_NAN) &&
3516 (hdr->i_fc[1] & IEEE80211_FC1_WEP))) {
3517 status = wma_process_rmf_frame(wma_handle, iface, hdr,
3518 rx_pkt, buf);
3519 if (status)
3520 return status;
3521 /*
3522 * CCMP header might have been pulled off reinitialize the
3523 * start pointer of mac header
3524 */
3525 *wh = (struct ieee80211_frame *)qdf_nbuf_data(buf);
3526 }
3527
3528 return 0;
3529 }
3530
3531 /**
3532 * wma_is_pkt_drop_candidate() - check if the mgmt frame should be dropped
3533 * @wma_handle: wma handle
3534 * @peer_addr: peer MAC address
3535 * @bssid: BSSID Address
3536 * @subtype: Management frame subtype
3537 *
3538 * This function is used to decide if a particular management frame should be
3539 * dropped to prevent DOS attack. Timestamp is used to decide the DOS attack.
3540 *
3541 * Return: true if the packet should be dropped and false otherwise
3542 */
wma_is_pkt_drop_candidate(tp_wma_handle wma_handle,uint8_t * peer_addr,uint8_t * bssid,uint8_t subtype)3543 static bool wma_is_pkt_drop_candidate(tp_wma_handle wma_handle,
3544 uint8_t *peer_addr, uint8_t *bssid,
3545 uint8_t subtype)
3546 {
3547 bool should_drop = false;
3548 uint8_t nan_addr[] = {0x50, 0x6F, 0x9A, 0x01, 0x00, 0x00};
3549
3550 /* Drop the beacons from NAN device */
3551 if ((subtype == MGMT_SUBTYPE_BEACON) &&
3552 (!qdf_mem_cmp(nan_addr, bssid, NAN_CLUSTER_ID_BYTES))) {
3553 should_drop = true;
3554 goto end;
3555 }
3556 end:
3557 return should_drop;
3558 }
3559
3560 #define RATE_LIMIT 16
3561
wma_form_rx_packet(qdf_nbuf_t buf,struct mgmt_rx_event_params * mgmt_rx_params,cds_pkt_t * rx_pkt)3562 int wma_form_rx_packet(qdf_nbuf_t buf,
3563 struct mgmt_rx_event_params *mgmt_rx_params,
3564 cds_pkt_t *rx_pkt)
3565 {
3566 uint8_t vdev_id = WMA_INVALID_VDEV_ID;
3567 struct ieee80211_frame *wh;
3568 uint8_t mgt_type, mgt_subtype;
3569 int status;
3570 tp_wma_handle wma_handle = (tp_wma_handle)
3571 cds_get_context(QDF_MODULE_ID_WMA);
3572 static uint8_t limit_prints_invalid_len = RATE_LIMIT - 1;
3573 static uint8_t limit_prints_load_unload = RATE_LIMIT - 1;
3574 static uint8_t limit_prints_recovery = RATE_LIMIT - 1;
3575
3576 if (!wma_handle) {
3577 qdf_nbuf_free(buf);
3578 qdf_mem_free(rx_pkt);
3579 return -EINVAL;
3580 }
3581
3582 if (!mgmt_rx_params) {
3583 limit_prints_invalid_len++;
3584 if (limit_prints_invalid_len == RATE_LIMIT) {
3585 wma_debug("mgmt rx params is NULL");
3586 limit_prints_invalid_len = 0;
3587 }
3588 qdf_nbuf_free(buf);
3589 qdf_mem_free(rx_pkt);
3590 return -EINVAL;
3591 }
3592
3593 if (cds_is_load_or_unload_in_progress()) {
3594 limit_prints_load_unload++;
3595 if (limit_prints_load_unload == RATE_LIMIT) {
3596 wma_debug("Load/Unload in progress");
3597 limit_prints_load_unload = 0;
3598 }
3599 qdf_nbuf_free(buf);
3600 qdf_mem_free(rx_pkt);
3601 return -EINVAL;
3602 }
3603
3604 if (cds_is_driver_recovering()) {
3605 limit_prints_recovery++;
3606 if (limit_prints_recovery == RATE_LIMIT) {
3607 wma_debug("Recovery in progress");
3608 limit_prints_recovery = 0;
3609 }
3610 qdf_nbuf_free(buf);
3611 qdf_mem_free(rx_pkt);
3612 return -EINVAL;
3613 }
3614
3615 if (cds_is_driver_in_bad_state()) {
3616 limit_prints_recovery++;
3617 if (limit_prints_recovery == RATE_LIMIT) {
3618 wma_debug("Driver in bad state");
3619 limit_prints_recovery = 0;
3620 }
3621 qdf_nbuf_free(buf);
3622 qdf_mem_free(rx_pkt);
3623 return -EINVAL;
3624 }
3625
3626 /*
3627 * Fill in meta information needed by pe/lim
3628 * TODO: Try to maintain rx metainfo as part of skb->data.
3629 */
3630 rx_pkt->pkt_meta.frequency = mgmt_rx_params->chan_freq;
3631 rx_pkt->pkt_meta.scan_src = mgmt_rx_params->flags;
3632
3633 /*
3634 * Get the rssi value from the current snr value
3635 * using standard noise floor of -96.
3636 */
3637 rx_pkt->pkt_meta.rssi = mgmt_rx_params->snr +
3638 WMA_NOISE_FLOOR_DBM_DEFAULT;
3639 rx_pkt->pkt_meta.snr = mgmt_rx_params->snr;
3640
3641 /* If absolute rssi is available from firmware, use it */
3642 if (mgmt_rx_params->rssi != 0)
3643 rx_pkt->pkt_meta.rssi_raw = mgmt_rx_params->rssi;
3644 else
3645 rx_pkt->pkt_meta.rssi_raw = rx_pkt->pkt_meta.rssi;
3646
3647
3648 /*
3649 * FIXME: Assigning the local timestamp as hw timestamp is not
3650 * available. Need to see if pe/lim really uses this data.
3651 */
3652 rx_pkt->pkt_meta.timestamp = (uint32_t) jiffies;
3653 rx_pkt->pkt_meta.mpdu_hdr_len = sizeof(struct ieee80211_frame);
3654 rx_pkt->pkt_meta.mpdu_len = mgmt_rx_params->buf_len;
3655
3656 /*
3657 * The buf_len should be at least 802.11 header len
3658 */
3659 if (mgmt_rx_params->buf_len < rx_pkt->pkt_meta.mpdu_hdr_len) {
3660 wma_err("MPDU Len %d lesser than header len %d",
3661 mgmt_rx_params->buf_len,
3662 rx_pkt->pkt_meta.mpdu_hdr_len);
3663 qdf_nbuf_free(buf);
3664 qdf_mem_free(rx_pkt);
3665 return -EINVAL;
3666 }
3667
3668 rx_pkt->pkt_meta.mpdu_data_len = mgmt_rx_params->buf_len -
3669 rx_pkt->pkt_meta.mpdu_hdr_len;
3670
3671 rx_pkt->pkt_meta.roamCandidateInd = 0;
3672
3673 wh = (struct ieee80211_frame *)qdf_nbuf_data(buf);
3674
3675 /*
3676 * If the mpdu_data_len is greater than Max (2k), drop the frame
3677 */
3678 if (rx_pkt->pkt_meta.mpdu_data_len > MAX_MGMT_MPDU_LEN) {
3679 wma_err("Data Len %d greater than max, dropping frame from "QDF_MAC_ADDR_FMT,
3680 rx_pkt->pkt_meta.mpdu_data_len,
3681 QDF_MAC_ADDR_REF(wh->i_addr3));
3682 qdf_nbuf_free(buf);
3683 qdf_mem_free(rx_pkt);
3684 return -EINVAL;
3685 }
3686
3687 rx_pkt->pkt_meta.mpdu_hdr_ptr = qdf_nbuf_data(buf);
3688 rx_pkt->pkt_meta.mpdu_data_ptr = rx_pkt->pkt_meta.mpdu_hdr_ptr +
3689 rx_pkt->pkt_meta.mpdu_hdr_len;
3690 rx_pkt->pkt_meta.tsf_delta = mgmt_rx_params->tsf_delta;
3691 rx_pkt->pkt_buf = buf;
3692 rx_pkt->pkt_meta.pkt_qdf_buf = buf;
3693
3694 /* If it is a beacon/probe response, save it for future use */
3695 mgt_type = (wh)->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3696 mgt_subtype = (wh)->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3697
3698 if (mgt_type == IEEE80211_FC0_TYPE_MGT &&
3699 (mgt_subtype == MGMT_SUBTYPE_DISASSOC ||
3700 mgt_subtype == MGMT_SUBTYPE_DEAUTH ||
3701 mgt_subtype == MGMT_SUBTYPE_ACTION)) {
3702 if (wma_find_vdev_id_by_bssid(wma_handle, wh->i_addr3,
3703 &vdev_id) == QDF_STATUS_SUCCESS) {
3704 status = wma_check_and_process_rmf_frame(wma_handle,
3705 vdev_id,
3706 &wh,
3707 rx_pkt,
3708 buf);
3709 if (status)
3710 return status;
3711 } else if (wma_find_vdev_id_by_addr(wma_handle, wh->i_addr1,
3712 &vdev_id) == QDF_STATUS_SUCCESS) {
3713 status = wma_check_and_process_rmf_frame(wma_handle,
3714 vdev_id,
3715 &wh,
3716 rx_pkt,
3717 buf);
3718 if (status)
3719 return status;
3720 } else if (mgt_subtype == MGMT_SUBTYPE_ACTION) {
3721 /* NAN Action frame */
3722 vdev_id = wlan_nan_get_vdev_id_from_bssid(
3723 wma_handle->pdev,
3724 wh->i_addr3,
3725 WLAN_ACTION_OUI_ID);
3726
3727 if (vdev_id != WMA_INVALID_VDEV_ID) {
3728 status = wma_check_and_process_rmf_frame(
3729 wma_handle,
3730 vdev_id, &wh,
3731 rx_pkt, buf);
3732 if (status)
3733 return status;
3734 }
3735 }
3736 }
3737
3738 rx_pkt->pkt_meta.session_id =
3739 (vdev_id == WMA_INVALID_VDEV_ID ? 0 : vdev_id);
3740
3741 if (mgt_type == IEEE80211_FC0_TYPE_MGT &&
3742 (mgt_subtype == MGMT_SUBTYPE_BEACON ||
3743 mgt_subtype == MGMT_SUBTYPE_PROBE_RESP)) {
3744 if (mgmt_rx_params->buf_len <=
3745 (sizeof(struct ieee80211_frame) +
3746 offsetof(struct wlan_bcn_frame, ie))) {
3747 wma_debug("Dropping frame from "QDF_MAC_ADDR_FMT,
3748 QDF_MAC_ADDR_REF(wh->i_addr3));
3749 cds_pkt_return_packet(rx_pkt);
3750 return -EINVAL;
3751 }
3752 }
3753
3754 if (wma_is_pkt_drop_candidate(wma_handle, wh->i_addr2, wh->i_addr3,
3755 mgt_subtype)) {
3756 cds_pkt_return_packet(rx_pkt);
3757 return -EINVAL;
3758 }
3759 wma_mgmt_pktdump_rx_handler(mgmt_rx_params, rx_pkt,
3760 wma_handle, mgt_type, mgt_subtype);
3761
3762 return 0;
3763 }
3764
3765 /**
3766 * wma_mem_endianness_based_copy() - does memory copy from src to dst
3767 * @dst: destination address
3768 * @src: source address
3769 * @size: size to be copied
3770 *
3771 * This function copies the memory of size passed from source
3772 * address to destination address.
3773 *
3774 * Return: Nothing
3775 */
3776 #ifdef BIG_ENDIAN_HOST
wma_mem_endianness_based_copy(uint8_t * dst,uint8_t * src,uint32_t size)3777 static void wma_mem_endianness_based_copy(
3778 uint8_t *dst, uint8_t *src, uint32_t size)
3779 {
3780 /*
3781 * For big endian host, copy engine byte_swap is enabled
3782 * But the rx mgmt frame buffer content is in network byte order
3783 * Need to byte swap the mgmt frame buffer content - so when
3784 * copy engine does byte_swap - host gets buffer content in the
3785 * correct byte order.
3786 */
3787
3788 uint32_t i;
3789 uint32_t *destp, *srcp;
3790
3791 destp = (uint32_t *) dst;
3792 srcp = (uint32_t *) src;
3793 for (i = 0; i < (roundup(size, sizeof(uint32_t)) / 4); i++) {
3794 *destp = cpu_to_le32(*srcp);
3795 destp++;
3796 srcp++;
3797 }
3798 }
3799 #else
wma_mem_endianness_based_copy(uint8_t * dst,uint8_t * src,uint32_t size)3800 static void wma_mem_endianness_based_copy(
3801 uint8_t *dst, uint8_t *src, uint32_t size)
3802 {
3803 qdf_mem_copy(dst, src, size);
3804 }
3805 #endif
3806
3807 #define RESERVE_BYTES 100
3808 /**
3809 * wma_mgmt_rx_process() - process management rx frame.
3810 * @handle: wma handle
3811 * @data: rx data
3812 * @data_len: data length
3813 *
3814 * Return: 0 for success or error code
3815 */
wma_mgmt_rx_process(void * handle,uint8_t * data,uint32_t data_len)3816 static int wma_mgmt_rx_process(void *handle, uint8_t *data,
3817 uint32_t data_len)
3818 {
3819 tp_wma_handle wma_handle = (tp_wma_handle) handle;
3820 struct mgmt_rx_event_params *mgmt_rx_params;
3821 struct wlan_objmgr_psoc *psoc;
3822 uint8_t *bufp;
3823 qdf_nbuf_t wbuf;
3824 QDF_STATUS status;
3825
3826 if (wma_validate_handle(wma_handle))
3827 return -EINVAL;
3828
3829 mgmt_rx_params = qdf_mem_malloc(sizeof(*mgmt_rx_params));
3830 if (!mgmt_rx_params) {
3831 return -ENOMEM;
3832 }
3833
3834 if (wmi_extract_mgmt_rx_params(wma_handle->wmi_handle,
3835 data, mgmt_rx_params, &bufp) != QDF_STATUS_SUCCESS) {
3836 wma_err_rl("Extraction of mgmt rx params failed");
3837 qdf_mem_free(mgmt_rx_params);
3838 return -EINVAL;
3839 }
3840
3841 if (mgmt_rx_params->buf_len > data_len ||
3842 !mgmt_rx_params->buf_len ||
3843 !bufp) {
3844 wma_err_rl("Invalid data_len %u, buf_len %u bufp %pK",
3845 data_len, mgmt_rx_params->buf_len, bufp);
3846 qdf_mem_free(mgmt_rx_params);
3847 return -EINVAL;
3848 }
3849
3850 if (!mgmt_rx_params->chan_freq) {
3851 /*
3852 * It indicates that FW is legacy and is operating on
3853 * channel numbers and it also indicates that BAND_6G support
3854 * is not there as BAND_6G works only on frequencies and channel
3855 * numbers can be treated as unique.
3856 */
3857 mgmt_rx_params->chan_freq = wlan_reg_legacy_chan_to_freq(
3858 wma_handle->pdev,
3859 mgmt_rx_params->channel);
3860 }
3861
3862 mgmt_rx_params->pdev_id = 0;
3863 mgmt_rx_params->rx_params = NULL;
3864
3865 /*
3866 * Allocate the memory for this rx packet, add extra 100 bytes for:-
3867 *
3868 * 1. Filling the missing RSN capabilities by some APs, which fill the
3869 * RSN IE length as extra 2 bytes but dont fill the IE data with
3870 * capabilities, resulting in failure in unpack core due to length
3871 * mismatch. Check sir_validate_and_rectify_ies for more info.
3872 *
3873 * 2. In the API wma_process_rmf_frame(), the driver trims the CCMP
3874 * header by overwriting the IEEE header to memory occupied by CCMP
3875 * header, but an overflow is possible if the memory allocated to
3876 * frame is less than the sizeof(struct ieee80211_frame) +CCMP
3877 * HEADER len, so allocating 100 bytes would solve this issue too.
3878 *
3879 * 3. CCMP header is pointing to orig_hdr +
3880 * sizeof(struct ieee80211_frame) which could also result in OOB
3881 * access, if the data len is less than
3882 * sizeof(struct ieee80211_frame), allocating extra bytes would
3883 * result in solving this issue too.
3884 */
3885 wbuf = qdf_nbuf_alloc(NULL, roundup(mgmt_rx_params->buf_len +
3886 RESERVE_BYTES,
3887 4), 0, 4, false);
3888 if (!wbuf) {
3889 qdf_mem_free(mgmt_rx_params);
3890 return -ENOMEM;
3891 }
3892
3893 qdf_nbuf_put_tail(wbuf, mgmt_rx_params->buf_len);
3894 qdf_nbuf_set_protocol(wbuf, ETH_P_CONTROL);
3895
3896 qdf_mem_zero(((uint8_t *)qdf_nbuf_data(wbuf) + mgmt_rx_params->buf_len),
3897 (roundup(mgmt_rx_params->buf_len + RESERVE_BYTES, 4) -
3898 mgmt_rx_params->buf_len));
3899
3900 wma_mem_endianness_based_copy(qdf_nbuf_data(wbuf),
3901 bufp, mgmt_rx_params->buf_len);
3902
3903 psoc = (struct wlan_objmgr_psoc *)
3904 wma_handle->psoc;
3905 if (!psoc) {
3906 wma_err("psoc ctx is NULL");
3907 qdf_nbuf_free(wbuf);
3908 qdf_mem_free(mgmt_rx_params);
3909 return -EINVAL;
3910 }
3911
3912 status = mgmt_txrx_rx_handler(psoc, wbuf, mgmt_rx_params);
3913 if (status != QDF_STATUS_SUCCESS) {
3914 qdf_mem_free(mgmt_rx_params);
3915 return -EINVAL;
3916 }
3917
3918 qdf_mem_free(mgmt_rx_params);
3919 return 0;
3920 }
3921
3922 /**
3923 * wma_de_register_mgmt_frm_client() - deregister management frame
3924 *
3925 * This function deregisters the event handler registered for
3926 * WMI_MGMT_RX_EVENTID.
3927 *
3928 * Return: QDF status
3929 */
wma_de_register_mgmt_frm_client(void)3930 QDF_STATUS wma_de_register_mgmt_frm_client(void)
3931 {
3932 tp_wma_handle wma_handle = (tp_wma_handle)
3933 cds_get_context(QDF_MODULE_ID_WMA);
3934
3935 if (!wma_handle)
3936 return QDF_STATUS_E_NULL_VALUE;
3937
3938 #ifdef QCA_WIFI_FTM
3939 if (cds_get_conparam() == QDF_GLOBAL_FTM_MODE)
3940 return QDF_STATUS_SUCCESS;
3941 #endif
3942
3943 if (wmi_unified_unregister_event_handler(wma_handle->wmi_handle,
3944 wmi_mgmt_rx_event_id) != 0) {
3945 wma_err("Failed to Unregister rx mgmt handler with wmi");
3946 return QDF_STATUS_E_FAILURE;
3947 }
3948 return QDF_STATUS_SUCCESS;
3949 }
3950
3951 #ifdef WLAN_FEATURE_ROAM_OFFLOAD
3952 /**
3953 * wma_register_roaming_callbacks() - Register roaming callbacks
3954 * @csr_roam_auth_event_handle_cb: CSR callback routine pointer
3955 * @pe_roam_synch_cb: PE roam synch callback routine pointer
3956 *
3957 * Register the SME and PE callback routines with WMA for
3958 * handling roaming
3959 *
3960 * Return: Success or Failure Status
3961 */
wma_register_roaming_callbacks(QDF_STATUS (* csr_roam_auth_event_handle_cb)(struct mac_context * mac,uint8_t vdev_id,struct qdf_mac_addr bssid,uint32_t akm),pe_roam_synch_fn_t pe_roam_synch_cb,QDF_STATUS (* pe_disconnect_cb)(struct mac_context * mac,uint8_t vdev_id,uint8_t * deauth_disassoc_frame,uint16_t deauth_disassoc_frame_len,uint16_t reason_code),set_ies_fn_t pe_roam_set_ie_cb)3962 QDF_STATUS wma_register_roaming_callbacks(
3963 QDF_STATUS (*csr_roam_auth_event_handle_cb)(struct mac_context *mac,
3964 uint8_t vdev_id,
3965 struct qdf_mac_addr bssid,
3966 uint32_t akm),
3967 pe_roam_synch_fn_t pe_roam_synch_cb,
3968 QDF_STATUS (*pe_disconnect_cb) (struct mac_context *mac,
3969 uint8_t vdev_id,
3970 uint8_t *deauth_disassoc_frame,
3971 uint16_t deauth_disassoc_frame_len,
3972 uint16_t reason_code),
3973 set_ies_fn_t pe_roam_set_ie_cb)
3974 {
3975
3976 tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
3977
3978 if (!wma)
3979 return QDF_STATUS_E_FAILURE;
3980
3981 wma->csr_roam_auth_event_handle_cb = csr_roam_auth_event_handle_cb;
3982 wma->pe_roam_synch_cb = pe_roam_synch_cb;
3983 wma->pe_disconnect_cb = pe_disconnect_cb;
3984 wma->pe_roam_set_ie_cb = pe_roam_set_ie_cb;
3985 wma_debug("Registered roam synch callbacks with WMA successfully");
3986
3987 return QDF_STATUS_SUCCESS;
3988 }
3989 #endif
3990
3991 /**
3992 * wma_register_mgmt_frm_client() - register management frame callback
3993 *
3994 * This function registers event handler for WMI_MGMT_RX_EVENTID.
3995 *
3996 * Return: QDF status
3997 */
wma_register_mgmt_frm_client(void)3998 QDF_STATUS wma_register_mgmt_frm_client(void)
3999 {
4000 tp_wma_handle wma_handle = (tp_wma_handle)
4001 cds_get_context(QDF_MODULE_ID_WMA);
4002
4003 if (!wma_handle)
4004 return QDF_STATUS_E_NULL_VALUE;
4005
4006 if (wmi_unified_register_event_handler(wma_handle->wmi_handle,
4007 wmi_mgmt_rx_event_id,
4008 wma_mgmt_rx_process,
4009 WMA_RX_WORK_CTX) != 0) {
4010 wma_err("Failed to register rx mgmt handler with wmi");
4011 return QDF_STATUS_E_FAILURE;
4012 }
4013
4014 return QDF_STATUS_SUCCESS;
4015 }
4016
4017 /**
4018 * wma_register_packetdump_callback() - stores tx and rx mgmt packet dump
4019 * callback handler
4020 * @tx_cb: tx mgmt packetdump cb
4021 * @rx_cb: rx mgmt packetdump cb
4022 *
4023 * This function is used to store tx and rx mgmt. packet dump callback
4024 *
4025 * Return: None
4026 *
4027 */
wma_register_packetdump_callback(ol_txrx_pktdump_cb tx_cb,ol_txrx_pktdump_cb rx_cb)4028 void wma_register_packetdump_callback(
4029 ol_txrx_pktdump_cb tx_cb,
4030 ol_txrx_pktdump_cb rx_cb)
4031 {
4032 tp_wma_handle wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
4033
4034 if (!wma_handle)
4035 return;
4036
4037 wma_handle->wma_mgmt_tx_packetdump_cb = tx_cb;
4038 wma_handle->wma_mgmt_rx_packetdump_cb = rx_cb;
4039 }
4040
4041 /**
4042 * wma_deregister_packetdump_callback() - removes tx and rx mgmt packet dump
4043 * callback handler
4044 *
4045 * This function is used to remove tx and rx mgmt. packet dump callback
4046 *
4047 * Return: None
4048 *
4049 */
wma_deregister_packetdump_callback(void)4050 void wma_deregister_packetdump_callback(void)
4051 {
4052 tp_wma_handle wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
4053
4054 if (!wma_handle)
4055 return;
4056
4057 wma_handle->wma_mgmt_tx_packetdump_cb = NULL;
4058 wma_handle->wma_mgmt_rx_packetdump_cb = NULL;
4059 }
4060
wma_mgmt_unified_cmd_send(struct wlan_objmgr_vdev * vdev,qdf_nbuf_t buf,uint32_t desc_id,void * mgmt_tx_params)4061 QDF_STATUS wma_mgmt_unified_cmd_send(struct wlan_objmgr_vdev *vdev,
4062 qdf_nbuf_t buf, uint32_t desc_id,
4063 void *mgmt_tx_params)
4064 {
4065 tp_wma_handle wma_handle;
4066 int ret;
4067 QDF_STATUS status = QDF_STATUS_E_INVAL;
4068 struct wmi_mgmt_params *mgmt_params =
4069 (struct wmi_mgmt_params *)mgmt_tx_params;
4070 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
4071
4072 if (!mgmt_params) {
4073 wma_err("mgmt_params ptr passed is NULL");
4074 return QDF_STATUS_E_INVAL;
4075 }
4076 mgmt_params->desc_id = desc_id;
4077
4078 if (!vdev) {
4079 wma_err("vdev ptr passed is NULL");
4080 return QDF_STATUS_E_INVAL;
4081 }
4082
4083 wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
4084 if (!wma_handle)
4085 return QDF_STATUS_E_INVAL;
4086
4087 if (wmi_service_enabled(wma_handle->wmi_handle,
4088 wmi_service_mgmt_tx_wmi)) {
4089 status = wmi_mgmt_unified_cmd_send(wma_handle->wmi_handle,
4090 mgmt_params);
4091 } else {
4092 QDF_NBUF_CB_MGMT_TXRX_DESC_ID(buf)
4093 = mgmt_params->desc_id;
4094
4095 ret = cdp_mgmt_send_ext(soc, mgmt_params->vdev_id, buf,
4096 mgmt_params->tx_type,
4097 mgmt_params->use_6mbps,
4098 mgmt_params->chanfreq);
4099 status = qdf_status_from_os_return(ret);
4100 }
4101
4102 if (status != QDF_STATUS_SUCCESS) {
4103 wma_err("mgmt tx failed");
4104 return status;
4105 }
4106
4107 return QDF_STATUS_SUCCESS;
4108 }
4109
4110 #ifndef CONFIG_HL_SUPPORT
wma_mgmt_nbuf_unmap_cb(struct wlan_objmgr_pdev * pdev,qdf_nbuf_t buf)4111 void wma_mgmt_nbuf_unmap_cb(struct wlan_objmgr_pdev *pdev,
4112 qdf_nbuf_t buf)
4113 {
4114 struct wlan_objmgr_psoc *psoc;
4115 qdf_device_t dev;
4116
4117 if (!buf)
4118 return;
4119
4120 psoc = wlan_pdev_get_psoc(pdev);
4121 if (!psoc) {
4122 wma_err("Psoc handle NULL");
4123 return;
4124 }
4125
4126 dev = wlan_psoc_get_qdf_dev(psoc);
4127 qdf_nbuf_unmap_single(dev, buf, QDF_DMA_TO_DEVICE);
4128 }
4129
wma_mgmt_frame_fill_peer_cb(struct wlan_objmgr_peer * peer,qdf_nbuf_t buf)4130 QDF_STATUS wma_mgmt_frame_fill_peer_cb(struct wlan_objmgr_peer *peer,
4131 qdf_nbuf_t buf)
4132 {
4133 struct wlan_objmgr_psoc *psoc;
4134 struct wlan_objmgr_pdev *pdev;
4135
4136 psoc = wlan_peer_get_psoc(peer);
4137 if (!psoc) {
4138 wma_err("Psoc handle NULL");
4139 return QDF_STATUS_E_INVAL;
4140 }
4141
4142 pdev = wlan_objmgr_get_pdev_by_id((struct wlan_objmgr_psoc *)psoc,
4143 wlan_peer_get_pdev_id(peer),
4144 WLAN_LEGACY_WMA_ID);
4145 if (!pdev) {
4146 wma_err("Pdev handle NULL");
4147 return QDF_STATUS_E_INVAL;
4148 }
4149 wma_mgmt_nbuf_unmap_cb(pdev, buf);
4150 wlan_objmgr_pdev_release_ref(pdev, WLAN_LEGACY_WMA_ID);
4151
4152 return QDF_STATUS_SUCCESS;
4153 }
4154
4155 QDF_STATUS
wma_update_edca_pifs_param(WMA_HANDLE handle,struct edca_pifs_vparam * edca_pifs_param)4156 wma_update_edca_pifs_param(WMA_HANDLE handle,
4157 struct edca_pifs_vparam *edca_pifs_param)
4158 {
4159 tp_wma_handle wma_handle = (tp_wma_handle) handle;
4160 QDF_STATUS status;
4161
4162 status = wmi_unified_update_edca_pifs_param(wma_handle->wmi_handle,
4163 edca_pifs_param);
4164
4165 if (QDF_IS_STATUS_ERROR(status))
4166 wma_err("Failed to set EDCA/PIFS Parameters");
4167
4168 return status;
4169 }
4170 #endif
4171
4172 QDF_STATUS
wma_update_bss_peer_phy_mode(struct wlan_channel * des_chan,struct wlan_objmgr_vdev * vdev)4173 wma_update_bss_peer_phy_mode(struct wlan_channel *des_chan,
4174 struct wlan_objmgr_vdev *vdev)
4175 {
4176 struct wlan_objmgr_peer *bss_peer;
4177 enum wlan_phymode old_peer_phymode, new_phymode;
4178 tSirNwType nw_type;
4179 struct vdev_mlme_obj *mlme_obj;
4180
4181 bss_peer = wlan_objmgr_vdev_try_get_bsspeer(vdev, WLAN_LEGACY_WMA_ID);
4182 if (!bss_peer) {
4183 wma_err("not able to find bss peer for vdev %d",
4184 wlan_vdev_get_id(vdev));
4185 return QDF_STATUS_E_INVAL;
4186 }
4187
4188 old_peer_phymode = wlan_peer_get_phymode(bss_peer);
4189
4190 if (WLAN_REG_IS_24GHZ_CH_FREQ(des_chan->ch_freq)) {
4191 if (des_chan->ch_phymode == WLAN_PHYMODE_11B ||
4192 old_peer_phymode == WLAN_PHYMODE_11B)
4193 nw_type = eSIR_11B_NW_TYPE;
4194 else
4195 nw_type = eSIR_11G_NW_TYPE;
4196 } else {
4197 nw_type = eSIR_11A_NW_TYPE;
4198 }
4199
4200 new_phymode = wma_peer_phymode(nw_type, STA_ENTRY_PEER,
4201 IS_WLAN_PHYMODE_HT(old_peer_phymode),
4202 des_chan->ch_width,
4203 IS_WLAN_PHYMODE_VHT(old_peer_phymode),
4204 IS_WLAN_PHYMODE_HE(old_peer_phymode),
4205 wma_is_phymode_eht(old_peer_phymode));
4206
4207 if (new_phymode == old_peer_phymode) {
4208 wma_debug("Ignore update, old %d and new %d phymode are same, vdev_id : %d",
4209 old_peer_phymode, new_phymode,
4210 wlan_vdev_get_id(vdev));
4211 wlan_objmgr_peer_release_ref(bss_peer, WLAN_LEGACY_WMA_ID);
4212 return QDF_STATUS_SUCCESS;
4213 }
4214
4215 mlme_obj = wlan_vdev_mlme_get_cmpt_obj(vdev);
4216 if (!mlme_obj) {
4217 wma_err("not able to get mlme_obj");
4218 wlan_objmgr_peer_release_ref(bss_peer, WLAN_LEGACY_WMA_ID);
4219 return QDF_STATUS_E_INVAL;
4220 }
4221
4222 wlan_peer_obj_lock(bss_peer);
4223 wlan_peer_set_phymode(bss_peer, new_phymode);
4224 wlan_peer_obj_unlock(bss_peer);
4225
4226 wlan_objmgr_peer_release_ref(bss_peer, WLAN_LEGACY_WMA_ID);
4227
4228 mlme_obj->mgmt.generic.phy_mode = wmi_host_to_fw_phymode(new_phymode);
4229 des_chan->ch_phymode = new_phymode;
4230
4231 return QDF_STATUS_SUCCESS;
4232 }
4233
4234 QDF_STATUS
cm_send_ies_for_roam_invoke(struct wlan_objmgr_vdev * vdev,uint16_t dot11_mode)4235 cm_send_ies_for_roam_invoke(struct wlan_objmgr_vdev *vdev, uint16_t dot11_mode)
4236 {
4237 tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
4238 enum QDF_OPMODE op_mode;
4239 QDF_STATUS status;
4240 uint8_t vdev_id;
4241
4242 if (!wma)
4243 return QDF_STATUS_E_FAILURE;
4244
4245 vdev_id = wlan_vdev_get_id(vdev);
4246 op_mode = wlan_vdev_mlme_get_opmode(vdev);
4247
4248 status = wma->pe_roam_set_ie_cb(wma->mac_context, vdev_id, dot11_mode,
4249 op_mode);
4250 return status;
4251 }
4252