1 /*
2 * Copyright (c) 2021, The Linux Foundation. All rights reserved.
3 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17 #include "dp_types.h"
18 #include "qdf_nbuf.h"
19 #include "dp_internal.h"
20 #include "qdf_mem.h" /* qdf_mem_malloc,free */
21 #include <dp_be.h>
22 #include <qdf_nbuf_frag.h>
23 #include <hal_be_api_mon.h>
24 #include <dp_mon.h>
25 #include <dp_tx_mon_2.0.h>
26 #include <dp_mon_2.0.h>
27 #ifdef QCA_SUPPORT_LITE_MONITOR
28 #include <dp_lite_mon.h>
29 #endif
30
31 #define MAX_PPDU_INFO_LIST_DEPTH 64
32
33 #if defined(WLAN_TX_PKT_CAPTURE_ENH_BE) || defined(WLAN_PKT_CAPTURE_TX_2_0) ||\
34 defined(WLAN_TX_MON_CORE_DEBUG)
35 void
dp_tx_mon_status_free_packet_buf(struct dp_pdev * pdev,qdf_frag_t status_frag,uint32_t end_offset,struct dp_tx_mon_desc_list * mon_desc_list_ref)36 dp_tx_mon_status_free_packet_buf(struct dp_pdev *pdev,
37 qdf_frag_t status_frag, uint32_t end_offset,
38 struct dp_tx_mon_desc_list *mon_desc_list_ref)
39 {
40 struct dp_mon_pdev *mon_pdev;
41 struct dp_mon_pdev_be *mon_pdev_be;
42 struct dp_pdev_tx_monitor_be *tx_mon_be;
43 struct hal_mon_packet_info packet_info = {0};
44 uint8_t *tx_tlv;
45 uint8_t *mon_buf_tx_tlv;
46 uint8_t *tx_tlv_start;
47
48 if (qdf_unlikely(!pdev))
49 return;
50
51 mon_pdev = pdev->monitor_pdev;
52 if (qdf_unlikely(!mon_pdev))
53 return;
54
55 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
56 if (qdf_unlikely(!mon_pdev_be))
57 return;
58
59 tx_mon_be = &mon_pdev_be->tx_monitor_be;
60 tx_tlv = status_frag;
61 tx_tlv_start = tx_tlv;
62 /*
63 * parse each status buffer and find packet buffer in it
64 */
65 do {
66 if (hal_txmon_is_mon_buf_addr_tlv(pdev->soc->hal_soc, tx_tlv)) {
67 struct dp_mon_desc *mon_desc = NULL;
68 qdf_frag_t packet_buffer = NULL;
69
70 mon_buf_tx_tlv = ((uint8_t *)tx_tlv +
71 HAL_RX_TLV64_HDR_SIZE);
72 hal_txmon_populate_packet_info(pdev->soc->hal_soc,
73 mon_buf_tx_tlv,
74 &packet_info);
75
76 mon_desc = (struct dp_mon_desc *)(uintptr_t)packet_info.sw_cookie;
77
78 qdf_assert_always(mon_desc);
79
80 if (mon_desc->magic != DP_MON_DESC_MAGIC)
81 qdf_assert_always(0);
82
83 if (!mon_desc->unmapped) {
84 qdf_mem_unmap_page(pdev->soc->osdev,
85 (qdf_dma_addr_t)mon_desc->paddr,
86 DP_MON_DATA_BUFFER_SIZE,
87 QDF_DMA_FROM_DEVICE);
88 mon_desc->unmapped = 1;
89 }
90
91 packet_buffer = (qdf_frag_t)(mon_desc->buf_addr);
92 mon_desc->buf_addr = NULL;
93
94 qdf_assert_always(packet_buffer);
95 /* increment reap count */
96 mon_desc_list_ref->tx_mon_reap_cnt++;
97
98 /* add the mon_desc to free list */
99 dp_mon_add_to_free_desc_list(&mon_desc_list_ref->desc_list,
100 &mon_desc_list_ref->tail,
101 mon_desc);
102
103 tx_mon_be->stats.pkt_buf_recv++;
104 tx_mon_be->stats.pkt_buf_free++;
105
106 /* free buffer, mapped to descriptor */
107 qdf_frag_free(packet_buffer);
108 }
109
110 /* need api definition for hal_tx_status_get_next_tlv */
111 tx_tlv = hal_tx_status_get_next_tlv(tx_tlv,
112 mon_pdev->is_tlv_hdr_64_bit);
113 } while ((tx_tlv - tx_tlv_start) < end_offset);
114 }
115 #endif
116
117 #if defined(WLAN_TX_PKT_CAPTURE_ENH_BE) && defined(WLAN_PKT_CAPTURE_TX_2_0)
118 /**
119 * dp_tx_mon_status_queue_free() - API to free status buffer
120 * @pdev: pdev Handle
121 * @tx_mon_be: pointer to tx_monitor_be
122 * @mon_desc_list_ref: tx monitor descriptor list reference
123 *
124 * Return: void
125 */
126 static void
dp_tx_mon_status_queue_free(struct dp_pdev * pdev,struct dp_pdev_tx_monitor_be * tx_mon_be,struct dp_tx_mon_desc_list * mon_desc_list_ref)127 dp_tx_mon_status_queue_free(struct dp_pdev *pdev,
128 struct dp_pdev_tx_monitor_be *tx_mon_be,
129 struct dp_tx_mon_desc_list *mon_desc_list_ref)
130 {
131 uint8_t last_frag_q_idx = tx_mon_be->last_frag_q_idx;
132 qdf_frag_t status_frag = NULL;
133 uint8_t i = tx_mon_be->cur_frag_q_idx;
134 uint32_t end_offset = 0;
135
136 if (last_frag_q_idx > MAX_STATUS_BUFFER_IN_PPDU)
137 last_frag_q_idx = MAX_STATUS_BUFFER_IN_PPDU;
138
139 for (; i < last_frag_q_idx; i++) {
140 status_frag = tx_mon_be->frag_q_vec[i].frag_buf;
141
142 if (qdf_unlikely(!status_frag))
143 continue;
144
145 end_offset = tx_mon_be->frag_q_vec[i].end_offset;
146 dp_tx_mon_status_free_packet_buf(pdev, status_frag, end_offset,
147 mon_desc_list_ref);
148 tx_mon_be->stats.status_buf_free++;
149 qdf_frag_free(status_frag);
150 tx_mon_be->frag_q_vec[i].frag_buf = NULL;
151 tx_mon_be->frag_q_vec[i].end_offset = 0;
152 }
153 tx_mon_be->last_frag_q_idx = 0;
154 tx_mon_be->cur_frag_q_idx = 0;
155 }
156
157 /**
158 * dp_tx_mon_enqueue_mpdu_nbuf() - API to enqueue nbuf from per user mpdu queue
159 * @pdev: pdev Handle
160 * @tx_ppdu_info: pointer to tx ppdu info structure
161 * @user_id: user index
162 * @mpdu_nbuf: nbuf to be enqueue
163 *
164 * Return: void
165 */
166 static void
dp_tx_mon_enqueue_mpdu_nbuf(struct dp_pdev * pdev,struct dp_tx_ppdu_info * tx_ppdu_info,uint8_t user_id,qdf_nbuf_t mpdu_nbuf)167 dp_tx_mon_enqueue_mpdu_nbuf(struct dp_pdev *pdev,
168 struct dp_tx_ppdu_info *tx_ppdu_info,
169 uint8_t user_id, qdf_nbuf_t mpdu_nbuf)
170 {
171 qdf_nbuf_t radiotap = NULL;
172 /* enqueue mpdu_nbuf to the per user mpdu_q */
173 qdf_nbuf_queue_t *usr_mpdu_q = NULL;
174
175 if (!TXMON_PPDU_HAL(tx_ppdu_info, num_users))
176 QDF_BUG(0);
177
178 usr_mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, user_id, mpdu_q);
179
180 radiotap = qdf_nbuf_alloc(pdev->soc->osdev, MAX_MONITOR_HEADER,
181 MAX_MONITOR_HEADER,
182 4, FALSE);
183 if (qdf_unlikely(!radiotap)) {
184 qdf_err("Unable to allocate radiotap buffer\n");
185 qdf_nbuf_free(mpdu_nbuf);
186 return;
187 }
188
189 /* append ext list */
190 qdf_nbuf_append_ext_list(radiotap, mpdu_nbuf, qdf_nbuf_len(mpdu_nbuf));
191 qdf_nbuf_queue_add(usr_mpdu_q, radiotap);
192 }
193
194 /*
195 * TX MONITOR
196 *
197 * frame format
198 * -------------------------------------------------------------------------
199 * FUNC | ToDS | FromDS | ADDRESS 1 | ADDRESS 2 | ADDRESS 3 | ADDRESS 4 |
200 * ------------------------------------------------------------------------
201 * IBSS | 0 | 0 | DA | SA | BSSID | NOT USED |
202 * TO AP | 1 | 0 | BSSID | SA | DA | NOT USED |
203 * From AP| 0 | 1 | DA | BSSID | SA | NOT USED |
204 * WDS | 1 | 1 | RA | TA | DA | SA |
205 * ------------------------------------------------------------------------
206 *
207 * HOST GENERATED FRAME:
208 * =====================
209 * 1. RTS
210 * 2. CTS
211 * 3. ACK
212 * 4. BA
213 * 5. Multi STA BA
214 *
215 * control frame
216 * ------------------------------------------------------------
217 * | protocol 2b | Type 2b | subtype 4b | ToDS 1b | FromDS 1b |
218 * | Morefrag 1b | Retry 1b | pwr_mgmt 1b | More data 1b |
219 * | protected frm 1b | order 1b |
220 * -----------------------------------------------------------
221 * control frame originated from wireless station so ToDS = FromDS = 0,
222 *
223 * RTS
224 * ---------------------------------------------------------------------------
225 * | FrameCtl 2 | Duration 2 | Receiver Address 6 | Transmit address 6 | FCS |
226 * ---------------------------------------------------------------------------
227 * subtype in FC is RTS - 1101
228 * type in FC is control frame - 10
229 *
230 * CTS
231 * --------------------------------------------------------
232 * | FrameCtl 2 | Duration 2 | Receiver Address 6 | FCS 4 |
233 * --------------------------------------------------------
234 * subtype in FC is CTS - 0011
235 * type in FC is control frame - 10
236 *
237 * ACK
238 * --------------------------------------------------------
239 * | FrameCtl 2 | Duration 2 | Receiver Address 6 | FCS 4 |
240 * --------------------------------------------------------
241 * subtype in FC is ACK - 1011
242 * type in FC is control frame - 10
243 *
244 * Block ACK
245 * --------------------------------------------------------------------------
246 * | FC 2 | Dur 2 | RA 6 | TA 6 | BA CTRL 2 | BA Information variable | FCS |
247 * --------------------------------------------------------------------------
248 *
249 * Block Ack control
250 * ---------------------------------------------------------------
251 * | BA ACK POLICY B0 | BA TYPE B1-B4 | Rsv B5-B11 | TID B12-B15 |
252 * ---------------------------------------------------------------
253 *
254 * BA ack policy
255 * 0 - Normal Ack
256 * 1 - No Ack
257 *
258 * Block Ack Type
259 * 0 - Reserved
260 * 1 - extended compressed
261 * 2 - compressed
262 * 3 - Multi TID
263 * 4-5 - Reserved
264 * 6 - GCR
265 * 7-9 - Reserved
266 * 10 - GLK-GCR
267 * 11 - Multi-STA
268 * 12-15 - Reserved
269 *
270 * Block Ack information
271 * ----------------------------------------------------------
272 * | Block ack start seq ctrl 2 | Block ack bitmap variable |
273 * ----------------------------------------------------------
274 *
275 * Multi STA Block Ack Information
276 * -----------------------------------------------------------------
277 * | Per STA TID info 2 | BA start seq ctrl 2 | BA bitmap variable |
278 * -----------------------------------------------------------------
279 *
280 * Per STA TID info
281 * ------------------------------------
282 * | AID11 11b | Ack Type 1b | TID 4b |
283 * ------------------------------------
284 * AID11 - 2045 means unassociated STA, then ACK Type and TID 0, 15
285 *
286 * Mgmt/PS-POLL frame ack
287 * Ack type - 1 and TID - 15, BA_seq_ctrl & BA_bitmap - not present
288 *
289 * All ack context - with no bitmap (all AMPDU success)
290 * Ack type - 1 and TID - 14, BA_seq_ctrl & BA_bitmap - not present
291 *
292 * Block ack context
293 * Ack type - 0 and TID - 0~7 BA_seq_ctrl & BA_bitmap - present
294 *
295 * Ack context
296 * Ack type - 1 and TID - 0~7 BA_seq_ctrl & BA_bitmap - not present
297 *
298 *
299 */
300
301 /**
302 * dp_tx_mon_generate_cts2self_frm() - API to generate cts2self frame
303 * @pdev: pdev Handle
304 * @tx_ppdu_info: pointer to tx ppdu info structure
305 * @window_flag: frame generated window
306 *
307 * Return: void
308 */
309 static void
dp_tx_mon_generate_cts2self_frm(struct dp_pdev * pdev,struct dp_tx_ppdu_info * tx_ppdu_info,uint8_t window_flag)310 dp_tx_mon_generate_cts2self_frm(struct dp_pdev *pdev,
311 struct dp_tx_ppdu_info *tx_ppdu_info,
312 uint8_t window_flag)
313 {
314 /* allocate and populate CTS/ CTS2SELF frame */
315 /* enqueue 802.11 payload to per user mpdu_q */
316 struct dp_mon_pdev *mon_pdev;
317 struct dp_mon_pdev_be *mon_pdev_be;
318 struct dp_pdev_tx_monitor_be *tx_mon_be;
319 struct hal_tx_status_info *tx_status_info;
320 uint16_t duration_le = 0;
321 struct ieee80211_frame_min_one *wh_min = NULL;
322 qdf_nbuf_t mpdu_nbuf = NULL;
323 uint8_t frm_ctl;
324
325 /* sanity check */
326 if (qdf_unlikely(!pdev))
327 return;
328
329 mon_pdev = pdev->monitor_pdev;
330 if (qdf_unlikely(!mon_pdev))
331 return;
332
333 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
334 if (qdf_unlikely(!mon_pdev_be))
335 return;
336
337 tx_mon_be = &mon_pdev_be->tx_monitor_be;
338
339 if (window_flag == INITIATOR_WINDOW)
340 tx_status_info = &tx_mon_be->prot_status_info;
341 else
342 tx_status_info = &tx_mon_be->data_status_info;
343
344 /*
345 * for radiotap we allocate new skb,
346 * so we don't need reserver skb header
347 */
348 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
349 MAX_DUMMY_FRM_BODY, 0, 4, FALSE);
350 if (!mpdu_nbuf)
351 return;
352
353 wh_min = (struct ieee80211_frame_min_one *)qdf_nbuf_data(mpdu_nbuf);
354 qdf_mem_zero(wh_min, MAX_DUMMY_FRM_BODY);
355
356 frm_ctl = (QDF_IEEE80211_FC0_VERSION_0 | QDF_IEEE80211_FC0_TYPE_CTL |
357 QDF_IEEE80211_FC0_SUBTYPE_CTS);
358 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl;
359 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1;
360 wh_min->i_fc[1] = 0;
361 wh_min->i_fc[0] = frm_ctl;
362
363 duration_le = qdf_cpu_to_le16(TXMON_PPDU_COM(tx_ppdu_info, duration));
364 wh_min->i_dur[1] = (duration_le & 0xFF00) >> 8;
365 wh_min->i_dur[0] = (duration_le & 0xFF);
366
367 if (window_flag == INITIATOR_WINDOW) {
368 qdf_mem_copy(wh_min->i_addr1,
369 TXMON_STATUS_INFO(tx_status_info, addr1),
370 QDF_MAC_ADDR_SIZE);
371 } else {
372 qdf_mem_copy(wh_min->i_addr1,
373 TXMON_STATUS_INFO(tx_status_info, addr2),
374 QDF_MAC_ADDR_SIZE);
375 }
376
377 qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_min));
378 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, 0, mpdu_nbuf);
379 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1;
380 }
381
382 /**
383 * dp_tx_mon_generate_rts_frm() - API to generate rts frame
384 * @pdev: pdev Handle
385 * @tx_ppdu_info: pointer to tx ppdu info structure
386 * @window_flag: frame generated window
387 *
388 * Return: void
389 */
390 static void
dp_tx_mon_generate_rts_frm(struct dp_pdev * pdev,struct dp_tx_ppdu_info * tx_ppdu_info,uint8_t window_flag)391 dp_tx_mon_generate_rts_frm(struct dp_pdev *pdev,
392 struct dp_tx_ppdu_info *tx_ppdu_info,
393 uint8_t window_flag)
394 {
395 /* allocate and populate RTS frame */
396 /* enqueue 802.11 payload to per user mpdu_q */
397 struct dp_mon_pdev *mon_pdev;
398 struct dp_mon_pdev_be *mon_pdev_be;
399 struct dp_pdev_tx_monitor_be *tx_mon_be;
400 struct hal_tx_status_info *tx_status_info;
401 uint16_t duration_le = 0;
402 struct ieee80211_ctlframe_addr2 *wh_min = NULL;
403 qdf_nbuf_t mpdu_nbuf = NULL;
404 uint8_t frm_ctl;
405
406 /* sanity check */
407 if (qdf_unlikely(!pdev))
408 return;
409
410 mon_pdev = pdev->monitor_pdev;
411 if (qdf_unlikely(!mon_pdev))
412 return;
413
414 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
415 if (qdf_unlikely(!mon_pdev_be))
416 return;
417
418 tx_mon_be = &mon_pdev_be->tx_monitor_be;
419 tx_status_info = &tx_mon_be->prot_status_info;
420 /*
421 * for radiotap we allocate new skb,
422 * so we don't need reserver skb header
423 */
424 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
425 MAX_DUMMY_FRM_BODY, 0, 4, FALSE);
426 if (!mpdu_nbuf)
427 return;
428
429 wh_min = (struct ieee80211_ctlframe_addr2 *)qdf_nbuf_data(mpdu_nbuf);
430 qdf_mem_zero(wh_min, MAX_DUMMY_FRM_BODY);
431
432 frm_ctl = (QDF_IEEE80211_FC0_VERSION_0 | QDF_IEEE80211_FC0_TYPE_CTL |
433 QDF_IEEE80211_FC0_SUBTYPE_RTS);
434 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl;
435 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1;
436 wh_min->i_fc[1] = 0;
437 wh_min->i_fc[0] = frm_ctl;
438
439 duration_le = qdf_cpu_to_le16(TXMON_PPDU_COM(tx_ppdu_info, duration));
440 wh_min->i_aidordur[1] = (duration_le & 0xFF00) >> 8;
441 wh_min->i_aidordur[0] = (duration_le & 0xFF);
442
443 if (!tx_status_info->protection_addr)
444 tx_status_info = &tx_mon_be->data_status_info;
445
446 if (window_flag == INITIATOR_WINDOW) {
447 qdf_mem_copy(wh_min->i_addr1,
448 TXMON_STATUS_INFO(tx_status_info, addr1),
449 QDF_MAC_ADDR_SIZE);
450 qdf_mem_copy(wh_min->i_addr2,
451 TXMON_STATUS_INFO(tx_status_info, addr2),
452 QDF_MAC_ADDR_SIZE);
453 } else {
454 qdf_mem_copy(wh_min->i_addr1,
455 TXMON_STATUS_INFO(tx_status_info, addr2),
456 QDF_MAC_ADDR_SIZE);
457 qdf_mem_copy(wh_min->i_addr2,
458 TXMON_STATUS_INFO(tx_status_info, addr1),
459 QDF_MAC_ADDR_SIZE);
460 }
461
462 qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_min));
463 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, 0, mpdu_nbuf);
464 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1;
465 }
466
467 /**
468 * dp_tx_mon_generate_ack_frm() - API to generate ack frame
469 * @pdev: pdev Handle
470 * @tx_ppdu_info: pointer to tx ppdu info structure
471 *
472 * Return: void
473 */
474 static void
dp_tx_mon_generate_ack_frm(struct dp_pdev * pdev,struct dp_tx_ppdu_info * tx_ppdu_info)475 dp_tx_mon_generate_ack_frm(struct dp_pdev *pdev,
476 struct dp_tx_ppdu_info *tx_ppdu_info)
477 {
478 /* allocate and populate ACK frame */
479 /* enqueue 802.11 payload to per user mpdu_q */
480 struct dp_mon_pdev *mon_pdev;
481 struct dp_mon_pdev_be *mon_pdev_be;
482 struct dp_pdev_tx_monitor_be *tx_mon_be;
483 struct hal_tx_status_info *tx_status_info;
484 struct ieee80211_frame_min_one *wh_addr1 = NULL;
485 qdf_nbuf_t mpdu_nbuf = NULL;
486 uint8_t user_id = TXMON_PPDU(tx_ppdu_info, cur_usr_idx);
487 uint8_t frm_ctl;
488
489 /* sanity check */
490 if (qdf_unlikely(!pdev))
491 return;
492
493 mon_pdev = pdev->monitor_pdev;
494 if (qdf_unlikely(!mon_pdev))
495 return;
496
497 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
498 if (qdf_unlikely(!mon_pdev_be))
499 return;
500
501 tx_mon_be = &mon_pdev_be->tx_monitor_be;
502 tx_status_info = &tx_mon_be->data_status_info;
503 /*
504 * for radiotap we allocate new skb,
505 * so we don't need reserver skb header
506 */
507 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
508 MAX_DUMMY_FRM_BODY, 0, 4, FALSE);
509 if (!mpdu_nbuf)
510 return;
511
512 wh_addr1 = (struct ieee80211_frame_min_one *)qdf_nbuf_data(mpdu_nbuf);
513
514 frm_ctl = (QDF_IEEE80211_FC0_VERSION_0 | QDF_IEEE80211_FC0_TYPE_CTL |
515 QDF_IEEE80211_FC0_SUBTYPE_ACK);
516 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl;
517 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1;
518 wh_addr1->i_fc[1] = 0;
519 wh_addr1->i_fc[0] = frm_ctl;
520
521 qdf_mem_copy(wh_addr1->i_addr1,
522 TXMON_STATUS_INFO(tx_status_info, addr1),
523 QDF_MAC_ADDR_SIZE);
524
525 /* set duration zero for ack frame */
526 *(u_int16_t *)(&wh_addr1->i_dur) = qdf_cpu_to_le16(0x0000);
527
528 qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_addr1));
529
530 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, user_id, mpdu_nbuf);
531 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1;
532 }
533
534 /**
535 * dp_tx_mon_generate_3addr_qos_null_frm() - API to generate
536 * 3 address qosnull frame
537 *
538 * @pdev: pdev Handle
539 * @tx_ppdu_info: pointer to tx ppdu info structure
540 *
541 * Return: void
542 */
543 static void
dp_tx_mon_generate_3addr_qos_null_frm(struct dp_pdev * pdev,struct dp_tx_ppdu_info * tx_ppdu_info)544 dp_tx_mon_generate_3addr_qos_null_frm(struct dp_pdev *pdev,
545 struct dp_tx_ppdu_info *tx_ppdu_info)
546 {
547 /* allocate and populate 3 address qos null frame */
548 /* enqueue 802.11 payload to per user mpdu_q */
549 struct dp_mon_pdev *mon_pdev;
550 struct dp_mon_pdev_be *mon_pdev_be;
551 struct dp_pdev_tx_monitor_be *tx_mon_be;
552 struct hal_tx_status_info *tx_status_info;
553 struct ieee80211_qosframe *wh_addr3 = NULL;
554 qdf_nbuf_t mpdu_nbuf = NULL;
555 uint16_t duration_le = 0;
556 uint8_t num_users = 0;
557 uint8_t frm_ctl;
558
559 /* sanity check */
560 if (qdf_unlikely(!pdev))
561 return;
562
563 mon_pdev = pdev->monitor_pdev;
564 if (qdf_unlikely(!mon_pdev))
565 return;
566
567 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
568 if (qdf_unlikely(!mon_pdev_be))
569 return;
570
571 tx_mon_be = &mon_pdev_be->tx_monitor_be;
572 tx_status_info = &tx_mon_be->data_status_info;
573 /*
574 * for radiotap we allocate new skb,
575 * so we don't need reserver skb header
576 */
577 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
578 MAX_DUMMY_FRM_BODY, 0, 4, FALSE);
579 if (!mpdu_nbuf)
580 return;
581
582 wh_addr3 = (struct ieee80211_qosframe *)qdf_nbuf_data(mpdu_nbuf);
583 qdf_mem_zero(wh_addr3, sizeof(struct ieee80211_qosframe));
584
585 frm_ctl = (QDF_IEEE80211_FC0_VERSION_0 | QDF_IEEE80211_FC0_TYPE_DATA |
586 QDF_IEEE80211_FC0_SUBTYPE_QOS_NULL);
587 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl;
588 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1;
589 wh_addr3->i_fc[1] = 0;
590 wh_addr3->i_fc[0] = frm_ctl;
591
592 duration_le = qdf_cpu_to_le16(TXMON_PPDU_COM(tx_ppdu_info, duration));
593 wh_addr3->i_dur[1] = (duration_le & 0xFF00) >> 8;
594 wh_addr3->i_dur[0] = (duration_le & 0xFF);
595
596 qdf_mem_copy(wh_addr3->i_addr1,
597 TXMON_STATUS_INFO(tx_status_info, addr1),
598 QDF_MAC_ADDR_SIZE);
599 qdf_mem_copy(wh_addr3->i_addr2,
600 TXMON_STATUS_INFO(tx_status_info, addr2),
601 QDF_MAC_ADDR_SIZE);
602 qdf_mem_copy(wh_addr3->i_addr3,
603 TXMON_STATUS_INFO(tx_status_info, addr3),
604 QDF_MAC_ADDR_SIZE);
605
606 qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_addr3));
607 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, num_users, mpdu_nbuf);
608 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1;
609 }
610
611 /**
612 * dp_tx_mon_generate_4addr_qos_null_frm() - API to generate
613 * 4 address qos null frame
614 *
615 * @pdev: pdev Handle
616 * @tx_ppdu_info: pointer to tx ppdu info structure
617 *
618 * Return: void
619 */
620 static void
dp_tx_mon_generate_4addr_qos_null_frm(struct dp_pdev * pdev,struct dp_tx_ppdu_info * tx_ppdu_info)621 dp_tx_mon_generate_4addr_qos_null_frm(struct dp_pdev *pdev,
622 struct dp_tx_ppdu_info *tx_ppdu_info)
623 {
624 /* allocate and populate 4 address qos null frame */
625 /* enqueue 802.11 payload to per user mpdu_q */
626 struct dp_mon_pdev *mon_pdev;
627 struct dp_mon_pdev_be *mon_pdev_be;
628 struct dp_pdev_tx_monitor_be *tx_mon_be;
629 struct hal_tx_status_info *tx_status_info;
630 struct ieee80211_qosframe_addr4 *wh_addr4 = NULL;
631 qdf_nbuf_t mpdu_nbuf = NULL;
632 uint16_t duration_le = 0;
633 uint8_t num_users = 0;
634 uint8_t frm_ctl;
635
636 /* sanity check */
637 if (qdf_unlikely(!pdev))
638 return;
639
640 mon_pdev = pdev->monitor_pdev;
641 if (qdf_unlikely(!mon_pdev))
642 return;
643
644 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
645 if (qdf_unlikely(!mon_pdev_be))
646 return;
647
648 tx_mon_be = &mon_pdev_be->tx_monitor_be;
649 tx_status_info = &tx_mon_be->data_status_info;
650 /*
651 * for radiotap we allocate new skb,
652 * so we don't need reserver skb header
653 */
654 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
655 MAX_DUMMY_FRM_BODY, 0, 4, FALSE);
656 if (!mpdu_nbuf)
657 return;
658
659 wh_addr4 = (struct ieee80211_qosframe_addr4 *)qdf_nbuf_data(mpdu_nbuf);
660 qdf_mem_zero(wh_addr4, sizeof(struct ieee80211_qosframe_addr4));
661
662 frm_ctl = (QDF_IEEE80211_FC0_VERSION_0 | QDF_IEEE80211_FC0_TYPE_DATA |
663 QDF_IEEE80211_FC0_SUBTYPE_QOS_NULL);
664 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl;
665 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1;
666 wh_addr4->i_fc[1] = 0;
667 wh_addr4->i_fc[0] = frm_ctl;
668
669 duration_le = qdf_cpu_to_le16(TXMON_PPDU_COM(tx_ppdu_info, duration));
670 wh_addr4->i_dur[1] = (duration_le & 0xFF00) >> 8;
671 wh_addr4->i_dur[0] = (duration_le & 0xFF);
672
673 qdf_mem_copy(wh_addr4->i_addr1,
674 TXMON_STATUS_INFO(tx_status_info, addr1),
675 QDF_MAC_ADDR_SIZE);
676 qdf_mem_copy(wh_addr4->i_addr2,
677 TXMON_STATUS_INFO(tx_status_info, addr2),
678 QDF_MAC_ADDR_SIZE);
679 qdf_mem_copy(wh_addr4->i_addr3,
680 TXMON_STATUS_INFO(tx_status_info, addr3),
681 QDF_MAC_ADDR_SIZE);
682 qdf_mem_copy(wh_addr4->i_addr4,
683 TXMON_STATUS_INFO(tx_status_info, addr4),
684 QDF_MAC_ADDR_SIZE);
685
686 qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_addr4));
687 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, num_users, mpdu_nbuf);
688 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1;
689 }
690
691 #define TXMON_BA_CTRL_SZ 2
692 #define TXMON_BA_INFO_SZ(bitmap_sz) ((4 * (bitmap_sz)) + 6)
693 #define TXMON_MU_BA_ACK_FRAME_SZ(bitmap_sz) \
694 (sizeof(struct ieee80211_ctlframe_addr2) +\
695 TXMON_BA_CTRL_SZ + (bitmap_sz))
696
697 #define TXMON_BA_ACK_FRAME_SZ(bitmap_sz) \
698 (sizeof(struct ieee80211_ctlframe_addr2) +\
699 TXMON_BA_CTRL_SZ + TXMON_BA_INFO_SZ(bitmap_sz))
700
701 /**
702 * dp_tx_mon_generate_mu_block_ack_frm() - API to generate MU block ack frame
703 * @pdev: pdev Handle
704 * @tx_ppdu_info: pointer to tx ppdu info structure
705 * @window_flag: frame generated window
706 *
707 * Return: void
708 */
709 static void
dp_tx_mon_generate_mu_block_ack_frm(struct dp_pdev * pdev,struct dp_tx_ppdu_info * tx_ppdu_info,uint8_t window_flag)710 dp_tx_mon_generate_mu_block_ack_frm(struct dp_pdev *pdev,
711 struct dp_tx_ppdu_info *tx_ppdu_info,
712 uint8_t window_flag)
713 {
714 /* allocate and populate MU block ack frame */
715 /* enqueue 802.11 payload to per user mpdu_q */
716 struct dp_mon_pdev *mon_pdev;
717 struct dp_mon_pdev_be *mon_pdev_be;
718 struct dp_pdev_tx_monitor_be *tx_mon_be;
719 struct hal_tx_status_info *tx_status_info;
720 struct ieee80211_ctlframe_addr2 *wh_addr2 = NULL;
721 qdf_nbuf_t mpdu_nbuf = NULL;
722 uint16_t ba_control = 0;
723 uint8_t *frm = NULL;
724 uint32_t ba_sz = 0;
725 uint8_t num_users = TXMON_PPDU_HAL(tx_ppdu_info, num_users);
726 uint8_t i = 0;
727 uint8_t frm_ctl;
728
729 /* sanity check */
730 if (qdf_unlikely(!pdev))
731 return;
732
733 mon_pdev = pdev->monitor_pdev;
734 if (qdf_unlikely(!mon_pdev))
735 return;
736
737 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
738 if (qdf_unlikely(!mon_pdev_be))
739 return;
740
741 tx_mon_be = &mon_pdev_be->tx_monitor_be;
742 tx_status_info = &tx_mon_be->data_status_info;
743 for (i = 0; i < num_users; i++)
744 ba_sz += (4 << TXMON_BA_INFO_SZ(TXMON_PPDU_USR(tx_ppdu_info,
745 i,
746 ba_bitmap_sz)));
747
748 /*
749 * for multi sta block ack, do we need to increase the size
750 * or copy info on subsequent frame offset
751 *
752 * for radiotap we allocate new skb,
753 * so we don't need reserver skb header
754 */
755 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
756 TXMON_MU_BA_ACK_FRAME_SZ(ba_sz), 0, 4,
757 FALSE);
758 if (!mpdu_nbuf) {
759 /* TODO: update status and break */
760 return;
761 }
762
763 wh_addr2 = (struct ieee80211_ctlframe_addr2 *)qdf_nbuf_data(mpdu_nbuf);
764 qdf_mem_zero(wh_addr2, DP_BA_ACK_FRAME_SIZE);
765
766 frm_ctl = (QDF_IEEE80211_FC0_VERSION_0 | QDF_IEEE80211_FC0_TYPE_CTL |
767 QDF_IEEE80211_FC0_SUBTYPE_BA);
768 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl;
769 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1;
770 wh_addr2->i_fc[1] = 0;
771 wh_addr2->i_fc[0] = frm_ctl;
772
773 *(u_int16_t *)(&wh_addr2->i_aidordur) = qdf_cpu_to_le16(0x0000);
774
775 if (window_flag == RESPONSE_WINDOW) {
776 qdf_mem_copy(wh_addr2->i_addr2,
777 TXMON_STATUS_INFO(tx_status_info, addr2),
778 QDF_MAC_ADDR_SIZE);
779 if (num_users > 1)
780 qdf_mem_set(wh_addr2->i_addr1, QDF_MAC_ADDR_SIZE, 0xFF);
781 else
782 qdf_mem_copy(wh_addr2->i_addr1,
783 TXMON_STATUS_INFO(tx_status_info, addr1),
784 QDF_MAC_ADDR_SIZE);
785 } else {
786 qdf_mem_copy(wh_addr2->i_addr2,
787 TXMON_STATUS_INFO(tx_status_info, addr1),
788 QDF_MAC_ADDR_SIZE);
789 qdf_mem_copy(wh_addr2->i_addr1,
790 TXMON_STATUS_INFO(tx_status_info, addr2),
791 QDF_MAC_ADDR_SIZE);
792 }
793
794 frm = (uint8_t *)&wh_addr2[1];
795
796 /* BA control */
797 ba_control = 0x0016;
798 *((uint16_t *)frm) = qdf_cpu_to_le16(ba_control);
799 frm += 2;
800
801 for (i = 0; i < num_users; i++) {
802 *((uint16_t *)frm) =
803 qdf_cpu_to_le16((TXMON_PPDU_USR(tx_ppdu_info, i, tid) <<
804 DP_IEEE80211_BAR_CTL_TID_S) |
805 (TXMON_PPDU_USR(tx_ppdu_info, i,
806 aid) & 0x7FF));
807 frm += 2;
808 *((uint16_t *)frm) = qdf_cpu_to_le16(
809 TXMON_PPDU_USR(tx_ppdu_info, i, start_seq));
810 frm += 2;
811 qdf_mem_copy(frm,
812 TXMON_PPDU_USR(tx_ppdu_info, i, ba_bitmap),
813 4 <<
814 TXMON_PPDU_USR(tx_ppdu_info,
815 i, ba_bitmap_sz));
816 frm += 4 << TXMON_PPDU_USR(tx_ppdu_info, i, ba_bitmap_sz);
817 }
818
819 qdf_nbuf_set_pktlen(mpdu_nbuf,
820 (frm - (uint8_t *)qdf_nbuf_data(mpdu_nbuf)));
821
822 /* always enqueue to first active user */
823 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, 0, mpdu_nbuf);
824 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1;
825 /* HE MU fields not required for Multi Sta Block ack frame */
826 TXMON_PPDU_COM(tx_ppdu_info, he_mu_flags) = 0;
827 }
828
829 /**
830 * dp_tx_mon_generate_block_ack_frm() - API to generate block ack frame
831 * @pdev: pdev Handle
832 * @tx_ppdu_info: pointer to tx ppdu info structure
833 * @window_flag: frame generated window
834 *
835 * Return: void
836 */
837 static void
dp_tx_mon_generate_block_ack_frm(struct dp_pdev * pdev,struct dp_tx_ppdu_info * tx_ppdu_info,uint8_t window_flag)838 dp_tx_mon_generate_block_ack_frm(struct dp_pdev *pdev,
839 struct dp_tx_ppdu_info *tx_ppdu_info,
840 uint8_t window_flag)
841 {
842 /* allocate and populate block ack frame */
843 /* enqueue 802.11 payload to per user mpdu_q */
844 struct dp_mon_pdev *mon_pdev;
845 struct dp_mon_pdev_be *mon_pdev_be;
846 struct dp_pdev_tx_monitor_be *tx_mon_be;
847 struct hal_tx_status_info *tx_status_info;
848 struct ieee80211_ctlframe_addr2 *wh_addr2 = NULL;
849 qdf_nbuf_t mpdu_nbuf = NULL;
850 uint8_t *frm = NULL;
851 uint8_t user_id = TXMON_PPDU(tx_ppdu_info, cur_usr_idx);
852 uint32_t ba_bitmap_sz = TXMON_PPDU_USR(tx_ppdu_info,
853 user_id, ba_bitmap_sz);
854 uint8_t frm_ctl;
855
856 /* sanity check */
857 if (qdf_unlikely(!pdev))
858 return;
859
860 mon_pdev = pdev->monitor_pdev;
861 if (qdf_unlikely(!mon_pdev))
862 return;
863
864 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
865 if (qdf_unlikely(!mon_pdev_be))
866 return;
867
868 tx_mon_be = &mon_pdev_be->tx_monitor_be;
869 tx_status_info = &tx_mon_be->data_status_info;
870 /*
871 * for multi sta block ack, do we need to increase the size
872 * or copy info on subsequent frame offset
873 *
874 * for radiotap we allocate new skb,
875 * so we don't need reserver skb header
876 */
877 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
878 TXMON_BA_ACK_FRAME_SZ(ba_bitmap_sz),
879 0, 4, FALSE);
880 if (!mpdu_nbuf) {
881 /* TODO: update status and break */
882 return;
883 }
884
885 /*
886 * BA CONTROL
887 * fields required to construct block ack information
888 * B0 - BA ACK POLICY
889 * 0 - Normal ACK
890 * 1 - No ACK
891 * B1 - MULTI TID
892 * B2 - COMPRESSED BITMAP
893 * B12
894 * 00 - Basic block ack
895 * 01 - Compressed block ack
896 * 10 - Reserved
897 * 11 - Multi tid block ack
898 * B3-B11 - Reserved
899 * B12-B15 - TID info
900 *
901 * BA INFORMATION
902 * Per sta tid info
903 * AID: 11 bits
904 * ACK type: 1 bit
905 * TID: 4 bits
906 *
907 * BA SEQ CTRL
908 *
909 * BA bitmap
910 *
911 */
912
913 wh_addr2 = (struct ieee80211_ctlframe_addr2 *)qdf_nbuf_data(mpdu_nbuf);
914 qdf_mem_zero(wh_addr2, DP_BA_ACK_FRAME_SIZE);
915
916 frm_ctl = (QDF_IEEE80211_FC0_VERSION_0 | QDF_IEEE80211_FC0_TYPE_CTL |
917 QDF_IEEE80211_FC0_SUBTYPE_BA);
918 TXMON_PPDU_COM(tx_ppdu_info, frame_control) = frm_ctl;
919 TXMON_PPDU_COM(tx_ppdu_info, frame_control_info_valid) = 1;
920 wh_addr2->i_fc[1] = 0;
921 wh_addr2->i_fc[0] = frm_ctl;
922
923 /* duration */
924 *(u_int16_t *)(&wh_addr2->i_aidordur) = qdf_cpu_to_le16(0x0020);
925
926 if (window_flag) {
927 qdf_mem_copy(wh_addr2->i_addr2,
928 TXMON_STATUS_INFO(tx_status_info, addr2),
929 QDF_MAC_ADDR_SIZE);
930 qdf_mem_copy(wh_addr2->i_addr1,
931 TXMON_STATUS_INFO(tx_status_info, addr1),
932 QDF_MAC_ADDR_SIZE);
933 } else {
934 qdf_mem_copy(wh_addr2->i_addr2,
935 TXMON_STATUS_INFO(tx_status_info, addr1),
936 QDF_MAC_ADDR_SIZE);
937 qdf_mem_copy(wh_addr2->i_addr1,
938 TXMON_STATUS_INFO(tx_status_info, addr2),
939 QDF_MAC_ADDR_SIZE);
940 }
941
942 frm = (uint8_t *)&wh_addr2[1];
943 /* BA control */
944 *((uint16_t *)frm) = qdf_cpu_to_le16(TXMON_PPDU_USR(tx_ppdu_info,
945 user_id,
946 ba_control));
947 frm += 2;
948 *((uint16_t *)frm) = qdf_cpu_to_le16(TXMON_PPDU_USR(tx_ppdu_info,
949 user_id,
950 start_seq));
951 frm += 2;
952 qdf_mem_copy(frm,
953 TXMON_PPDU_USR(tx_ppdu_info, user_id, ba_bitmap),
954 4 << TXMON_PPDU_USR(tx_ppdu_info, user_id, ba_bitmap_sz));
955 frm += (4 << TXMON_PPDU_USR(tx_ppdu_info, user_id, ba_bitmap_sz));
956
957 qdf_nbuf_set_pktlen(mpdu_nbuf,
958 (frm - (uint8_t *)qdf_nbuf_data(mpdu_nbuf)));
959
960 dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, 0, mpdu_nbuf);
961
962 TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1;
963 }
964
965 /**
966 * dp_tx_mon_alloc_mpdu() - API to allocate mpdu and add that current
967 * user index
968 *
969 * @pdev: pdev Handle
970 * @tx_ppdu_info: pointer to tx ppdu info structure
971 *
972 * Return: void
973 */
974 static void
dp_tx_mon_alloc_mpdu(struct dp_pdev * pdev,struct dp_tx_ppdu_info * tx_ppdu_info)975 dp_tx_mon_alloc_mpdu(struct dp_pdev *pdev, struct dp_tx_ppdu_info *tx_ppdu_info)
976 {
977 qdf_nbuf_t mpdu_nbuf = NULL;
978 qdf_nbuf_queue_t *usr_mpdu_q = NULL;
979 uint32_t usr_idx = 0;
980
981 /*
982 * payload will be added as a frag to buffer
983 * and we allocate new skb for radiotap header
984 * we allocate a dummy buffer size
985 */
986 mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
987 MAX_MONITOR_HEADER, MAX_MONITOR_HEADER,
988 4, FALSE);
989 if (!mpdu_nbuf) {
990 qdf_err("%s: %d No memory to allocate mpdu_nbuf!!!!!\n",
991 __func__, __LINE__);
992 return;
993 }
994
995 usr_idx = TXMON_PPDU(tx_ppdu_info, cur_usr_idx);
996 usr_mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, usr_idx, mpdu_q);
997
998 qdf_nbuf_queue_add(usr_mpdu_q, mpdu_nbuf);
999 }
1000
1001 /**
1002 * dp_tx_mon_generate_data_frm() - API to generate data frame
1003 * @pdev: pdev Handle
1004 * @tx_ppdu_info: pointer to tx ppdu info structure
1005 * @take_ref:
1006 *
1007 * Return: void
1008 */
1009 static void
dp_tx_mon_generate_data_frm(struct dp_pdev * pdev,struct dp_tx_ppdu_info * tx_ppdu_info,bool take_ref)1010 dp_tx_mon_generate_data_frm(struct dp_pdev *pdev,
1011 struct dp_tx_ppdu_info *tx_ppdu_info,
1012 bool take_ref)
1013 {
1014 struct dp_mon_pdev *mon_pdev;
1015 struct dp_mon_pdev_be *mon_pdev_be;
1016 struct dp_pdev_tx_monitor_be *tx_mon_be;
1017 struct hal_tx_status_info *tx_status_info;
1018 qdf_nbuf_t mpdu_nbuf = NULL;
1019 qdf_nbuf_queue_t *usr_mpdu_q = NULL;
1020 uint32_t usr_idx = 0;
1021
1022 /* sanity check */
1023 if (qdf_unlikely(!pdev))
1024 return;
1025
1026 mon_pdev = pdev->monitor_pdev;
1027 if (qdf_unlikely(!mon_pdev))
1028 return;
1029
1030 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1031 if (qdf_unlikely(!mon_pdev_be))
1032 return;
1033
1034 tx_mon_be = &mon_pdev_be->tx_monitor_be;
1035
1036 tx_status_info = &tx_mon_be->data_status_info;
1037 usr_idx = TXMON_PPDU(tx_ppdu_info, cur_usr_idx);
1038 usr_mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, usr_idx, mpdu_q);
1039 mpdu_nbuf = qdf_nbuf_queue_last(usr_mpdu_q);
1040
1041 if (!mpdu_nbuf)
1042 QDF_BUG(0);
1043
1044 tx_mon_be->stats.pkt_buf_processed++;
1045
1046 /* add function to either copy or add frag to frag_list */
1047 qdf_nbuf_add_frag(pdev->soc->osdev,
1048 TXMON_STATUS_INFO(tx_status_info, buffer),
1049 mpdu_nbuf,
1050 TXMON_STATUS_INFO(tx_status_info, offset),
1051 TXMON_STATUS_INFO(tx_status_info, length),
1052 DP_MON_DATA_BUFFER_SIZE,
1053 take_ref, TXMON_NO_BUFFER_SZ);
1054 }
1055
1056 /**
1057 * dp_tx_mon_generate_prot_frm() - API to generate protection frame
1058 * @pdev: pdev Handle
1059 * @tx_ppdu_info: pointer to tx ppdu info structure
1060 *
1061 * Return: void
1062 */
1063 static void
dp_tx_mon_generate_prot_frm(struct dp_pdev * pdev,struct dp_tx_ppdu_info * tx_ppdu_info)1064 dp_tx_mon_generate_prot_frm(struct dp_pdev *pdev,
1065 struct dp_tx_ppdu_info *tx_ppdu_info)
1066 {
1067 struct dp_mon_pdev *mon_pdev;
1068 struct dp_mon_pdev_be *mon_pdev_be;
1069 struct dp_pdev_tx_monitor_be *tx_mon_be;
1070 struct hal_tx_status_info *tx_status_info;
1071
1072 /* sanity check */
1073 if (qdf_unlikely(!pdev))
1074 return;
1075
1076 mon_pdev = pdev->monitor_pdev;
1077 if (qdf_unlikely(!mon_pdev))
1078 return;
1079
1080 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1081 if (qdf_unlikely(!mon_pdev_be))
1082 return;
1083
1084 tx_mon_be = &mon_pdev_be->tx_monitor_be;
1085 tx_status_info = &tx_mon_be->prot_status_info;
1086
1087 /* update medium prot type from data */
1088 TXMON_STATUS_INFO(tx_status_info, medium_prot_type) =
1089 tx_mon_be->data_status_info.medium_prot_type;
1090
1091 switch (TXMON_STATUS_INFO(tx_status_info, medium_prot_type)) {
1092 case TXMON_MEDIUM_NO_PROTECTION:
1093 {
1094 /* no protection frame - do nothing */
1095 break;
1096 }
1097 case TXMON_MEDIUM_RTS_LEGACY:
1098 case TXMON_MEDIUM_RTS_11AC_STATIC_BW:
1099 case TXMON_MEDIUM_RTS_11AC_DYNAMIC_BW:
1100 {
1101 dp_tx_mon_generate_rts_frm(pdev, tx_ppdu_info,
1102 INITIATOR_WINDOW);
1103 break;
1104 }
1105 case TXMON_MEDIUM_CTS2SELF:
1106 {
1107 dp_tx_mon_generate_cts2self_frm(pdev, tx_ppdu_info,
1108 INITIATOR_WINDOW);
1109 break;
1110 }
1111 case TXMON_MEDIUM_QOS_NULL_NO_ACK_3ADDR:
1112 {
1113 dp_tx_mon_generate_3addr_qos_null_frm(pdev, tx_ppdu_info);
1114 break;
1115 }
1116 case TXMON_MEDIUM_QOS_NULL_NO_ACK_4ADDR:
1117 {
1118 dp_tx_mon_generate_4addr_qos_null_frm(pdev, tx_ppdu_info);
1119 break;
1120 }
1121 }
1122 }
1123
1124 /**
1125 * dp_tx_mon_generated_response_frm() - API to handle generated response frame
1126 * @pdev: pdev Handle
1127 * @tx_ppdu_info: pointer to tx ppdu info structure
1128 *
1129 * Return: QDF_STATUS
1130 */
1131 static QDF_STATUS
dp_tx_mon_generated_response_frm(struct dp_pdev * pdev,struct dp_tx_ppdu_info * tx_ppdu_info)1132 dp_tx_mon_generated_response_frm(struct dp_pdev *pdev,
1133 struct dp_tx_ppdu_info *tx_ppdu_info)
1134 {
1135 struct dp_mon_pdev *mon_pdev;
1136 struct dp_mon_pdev_be *mon_pdev_be;
1137 struct dp_pdev_tx_monitor_be *tx_mon_be;
1138 struct hal_tx_status_info *tx_status_info;
1139 QDF_STATUS status = QDF_STATUS_SUCCESS;
1140 uint8_t gen_response = 0;
1141
1142 /* sanity check */
1143 if (qdf_unlikely(!pdev))
1144 return QDF_STATUS_E_NOMEM;
1145
1146 mon_pdev = pdev->monitor_pdev;
1147 if (qdf_unlikely(!mon_pdev))
1148 return QDF_STATUS_E_NOMEM;
1149
1150 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1151 if (qdf_unlikely(!mon_pdev_be))
1152 return QDF_STATUS_E_NOMEM;
1153
1154 tx_mon_be = &mon_pdev_be->tx_monitor_be;
1155
1156 tx_status_info = &tx_mon_be->data_status_info;
1157 gen_response = TXMON_STATUS_INFO(tx_status_info, generated_response);
1158
1159 switch (gen_response) {
1160 case TXMON_GEN_RESP_SELFGEN_ACK:
1161 {
1162 dp_tx_mon_generate_ack_frm(pdev, tx_ppdu_info);
1163 break;
1164 }
1165 case TXMON_GEN_RESP_SELFGEN_CTS:
1166 {
1167 dp_tx_mon_generate_cts2self_frm(pdev, tx_ppdu_info,
1168 RESPONSE_WINDOW);
1169 break;
1170 }
1171 case TXMON_GEN_RESP_SELFGEN_BA:
1172 {
1173 dp_tx_mon_generate_block_ack_frm(pdev, tx_ppdu_info,
1174 RESPONSE_WINDOW);
1175 break;
1176 }
1177 case TXMON_GEN_RESP_SELFGEN_MBA:
1178 {
1179 dp_tx_mon_generate_mu_block_ack_frm(pdev, tx_ppdu_info,
1180 RESPONSE_WINDOW);
1181 break;
1182 }
1183 case TXMON_GEN_RESP_SELFGEN_CBF:
1184 {
1185 break;
1186 }
1187 case TXMON_GEN_RESP_SELFGEN_TRIG:
1188 {
1189 break;
1190 }
1191 case TXMON_GEN_RESP_SELFGEN_NDP_LMR:
1192 {
1193 break;
1194 }
1195 };
1196
1197 return status;
1198 }
1199
1200 /**
1201 * dp_tx_mon_update_ppdu_info_status() - API to update frame as information
1202 * is stored only for that processing
1203 *
1204 * @pdev: pdev Handle
1205 * @tx_data_ppdu_info: pointer to data tx ppdu info
1206 * @tx_prot_ppdu_info: pointer to protection tx ppdu info
1207 * @tx_tlv_hdr: pointer to tx_tlv_hdr
1208 * @status_frag: pointer to fragment
1209 * @tlv_status: tlv status return from hal api
1210 * @mon_desc_list_ref: tx monitor descriptor list reference
1211 *
1212 * Return: QDF_STATUS
1213 */
1214 static QDF_STATUS
dp_tx_mon_update_ppdu_info_status(struct dp_pdev * pdev,struct dp_tx_ppdu_info * tx_data_ppdu_info,struct dp_tx_ppdu_info * tx_prot_ppdu_info,void * tx_tlv_hdr,qdf_frag_t status_frag,uint32_t tlv_status,struct dp_tx_mon_desc_list * mon_desc_list_ref)1215 dp_tx_mon_update_ppdu_info_status(struct dp_pdev *pdev,
1216 struct dp_tx_ppdu_info *tx_data_ppdu_info,
1217 struct dp_tx_ppdu_info *tx_prot_ppdu_info,
1218 void *tx_tlv_hdr,
1219 qdf_frag_t status_frag,
1220 uint32_t tlv_status,
1221 struct dp_tx_mon_desc_list *mon_desc_list_ref)
1222 {
1223 struct dp_mon_pdev *mon_pdev;
1224 struct dp_mon_pdev_be *mon_pdev_be;
1225 struct dp_pdev_tx_monitor_be *tx_mon_be;
1226 struct hal_tx_status_info *tx_status_info;
1227 QDF_STATUS status = QDF_STATUS_SUCCESS;
1228
1229 /* sanity check */
1230 if (qdf_unlikely(!pdev))
1231 return QDF_STATUS_E_NOMEM;
1232
1233 mon_pdev = pdev->monitor_pdev;
1234 if (qdf_unlikely(!mon_pdev))
1235 return QDF_STATUS_E_NOMEM;
1236
1237 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1238 if (qdf_unlikely(!mon_pdev_be))
1239 return QDF_STATUS_E_NOMEM;
1240
1241 tx_mon_be = &mon_pdev_be->tx_monitor_be;
1242
1243 switch (tlv_status) {
1244 case HAL_MON_TX_FES_SETUP:
1245 {
1246 /*
1247 * start of initiator window
1248 *
1249 * got number of user count from fes setup tlv
1250 */
1251 break;
1252 }
1253 case HAL_MON_RX_RESPONSE_REQUIRED_INFO:
1254 {
1255 break;
1256 }
1257 case HAL_MON_TX_FES_STATUS_START_PROT:
1258 {
1259 /* update tsft to local */
1260 break;
1261 }
1262 case HAL_MON_TX_FES_STATUS_START_PPDU:
1263 {
1264 /* update tsft to local */
1265 break;
1266 }
1267 case HAL_MON_TX_FES_STATUS_PROT:
1268 {
1269 TXMON_PPDU_HAL(tx_prot_ppdu_info, is_used) = 1;
1270 TXMON_PPDU_COM(tx_prot_ppdu_info, ppdu_timestamp) =
1271 TXMON_PPDU_COM(tx_prot_ppdu_info, ppdu_timestamp) << 1;
1272
1273 /* based on medium protection type we need to generate frame */
1274 dp_tx_mon_generate_prot_frm(pdev, tx_prot_ppdu_info);
1275 break;
1276 }
1277 case HAL_MON_RX_FRAME_BITMAP_ACK:
1278 {
1279 break;
1280 }
1281 case HAL_MON_RX_FRAME_BITMAP_BLOCK_ACK_256:
1282 case HAL_MON_RX_FRAME_BITMAP_BLOCK_ACK_1K:
1283 {
1284 /*
1285 * this comes for each user
1286 * BlockAck is not same as ACK, single frame can hold
1287 * multiple BlockAck info
1288 */
1289 tx_status_info = &tx_mon_be->data_status_info;
1290
1291 if (TXMON_PPDU_HAL(tx_data_ppdu_info, num_users))
1292 dp_tx_mon_generate_block_ack_frm(pdev,
1293 tx_data_ppdu_info,
1294 INITIATOR_WINDOW);
1295 else
1296 dp_tx_mon_generate_mu_block_ack_frm(pdev,
1297 tx_data_ppdu_info,
1298 INITIATOR_WINDOW);
1299
1300 break;
1301 }
1302 case HAL_MON_TX_MPDU_START:
1303 {
1304 dp_tx_mon_alloc_mpdu(pdev, tx_data_ppdu_info);
1305 TXMON_PPDU_HAL(tx_data_ppdu_info, is_used) = 1;
1306 break;
1307 }
1308 case HAL_MON_TX_MSDU_START:
1309 {
1310 break;
1311 }
1312 case HAL_MON_TX_DATA:
1313 {
1314 TXMON_PPDU_HAL(tx_data_ppdu_info, is_used) = 1;
1315 dp_tx_mon_generate_data_frm(pdev, tx_data_ppdu_info, true);
1316 break;
1317 }
1318 case HAL_MON_TX_BUFFER_ADDR:
1319 {
1320 struct hal_mon_packet_info *packet_info = NULL;
1321 struct dp_mon_desc *mon_desc = NULL;
1322 qdf_frag_t packet_buffer = NULL;
1323 uint32_t end_offset = 0;
1324
1325 tx_status_info = &tx_mon_be->data_status_info;
1326 /* update buffer from packet info */
1327 packet_info = &TXMON_PPDU_HAL(tx_data_ppdu_info, packet_info);
1328 mon_desc = (struct dp_mon_desc *)(uintptr_t)packet_info->sw_cookie;
1329
1330 qdf_assert_always(mon_desc);
1331
1332 if (mon_desc->magic != DP_MON_DESC_MAGIC)
1333 qdf_assert_always(0);
1334
1335 qdf_assert_always(mon_desc->buf_addr);
1336 tx_mon_be->stats.pkt_buf_recv++;
1337
1338 if (!mon_desc->unmapped) {
1339 qdf_mem_unmap_page(pdev->soc->osdev,
1340 (qdf_dma_addr_t)mon_desc->paddr,
1341 DP_MON_DATA_BUFFER_SIZE,
1342 QDF_DMA_FROM_DEVICE);
1343 mon_desc->unmapped = 1;
1344 }
1345
1346 packet_buffer = mon_desc->buf_addr;
1347 mon_desc->buf_addr = NULL;
1348
1349 /* increment reap count */
1350 mon_desc_list_ref->tx_mon_reap_cnt++;
1351
1352 /* add the mon_desc to free list */
1353 dp_mon_add_to_free_desc_list(&mon_desc_list_ref->desc_list,
1354 &mon_desc_list_ref->tail,
1355 mon_desc);
1356
1357 TXMON_STATUS_INFO(tx_status_info, buffer) = packet_buffer;
1358 TXMON_STATUS_INFO(tx_status_info, offset) = end_offset;
1359 TXMON_STATUS_INFO(tx_status_info,
1360 length) = packet_info->dma_length;
1361
1362 TXMON_PPDU_HAL(tx_data_ppdu_info, is_used) = 1;
1363 dp_tx_mon_generate_data_frm(pdev, tx_data_ppdu_info, false);
1364 break;
1365 }
1366 case HAL_MON_TX_FES_STATUS_END:
1367 {
1368 break;
1369 }
1370 case HAL_MON_RESPONSE_END_STATUS_INFO:
1371 {
1372 dp_tx_mon_generated_response_frm(pdev, tx_data_ppdu_info);
1373 break;
1374 }
1375 case HAL_MON_TX_FES_STATUS_START:
1376 {
1377 /* update the medium protection type */
1378 break;
1379 }
1380 case HAL_MON_TX_QUEUE_EXTENSION:
1381 {
1382 /* No action for Queue Extension TLV */
1383 break;
1384 }
1385 case HAL_MON_TX_FW2SW:
1386 {
1387 /* update the frequency */
1388 tx_status_info = &tx_mon_be->data_status_info;
1389
1390 TXMON_PPDU_COM(tx_data_ppdu_info,
1391 chan_freq) = TXMON_STATUS_INFO(tx_status_info,
1392 freq);
1393 TXMON_PPDU_COM(tx_prot_ppdu_info,
1394 chan_freq) = TXMON_STATUS_INFO(tx_status_info,
1395 freq);
1396 break;
1397 }
1398 default:
1399 {
1400 /* return or break in default case */
1401 break;
1402 }
1403 };
1404
1405 return status;
1406 }
1407
1408 #ifdef MONITOR_TLV_RECORDING_ENABLE
1409 /**
1410 * dp_tx_mon_record_index_update() - update the indexes of dp_mon_tlv_logger
1411 * to store next Tx TLV
1412 *
1413 * @mon_pdev_be: pointer to dp_mon_pdev_be
1414 *
1415 * Return: void
1416 */
dp_tx_mon_record_index_update(struct dp_mon_pdev_be * mon_pdev_be)1417 void dp_tx_mon_record_index_update(struct dp_mon_pdev_be *mon_pdev_be)
1418 {
1419 struct dp_mon_tlv_logger *tlv_log = NULL;
1420 struct dp_tx_mon_tlv_info *tlv_info = NULL;
1421
1422 tlv_log = mon_pdev_be->tx_tlv_log;
1423 tlv_info = (struct dp_tx_mon_tlv_info *)tlv_log->buff;
1424
1425 (tlv_log->curr_ppdu_pos + 1 == MAX_NUM_PPDU_RECORD) ?
1426 tlv_log->curr_ppdu_pos = 0 :
1427 tlv_log->curr_ppdu_pos++;
1428
1429 tlv_log->wrap_flag = 0;
1430 tlv_log->ppdu_start_idx = tlv_log->curr_ppdu_pos *
1431 MAX_TLVS_PER_PPDU;
1432 tlv_log->mpdu_idx = tlv_log->ppdu_start_idx +
1433 MAX_PPDU_START_TLV_NUM;
1434 tlv_log->ppdu_end_idx = tlv_log->mpdu_idx + MAX_MPDU_TLV_NUM;
1435 tlv_log->max_ppdu_start_idx = tlv_log->ppdu_start_idx +
1436 MAX_PPDU_START_TLV_NUM - 1;
1437 tlv_log->max_mpdu_idx = tlv_log->mpdu_idx +
1438 MAX_MPDU_TLV_NUM - 1;
1439 tlv_log->max_ppdu_end_idx = tlv_log->ppdu_end_idx +
1440 MAX_PPDU_END_TLV_NUM - 1;
1441 }
1442
1443 /**
1444 * dp_tx_mon_record_tlv() - Store the contents of the tlv in buffer
1445 *
1446 * @mon_pdev_be: pointer to dp_mon_pdev_be
1447 * @data_ppdu_info: pointer to HAL Tx data ppdu info
1448 * @proto_ppdu_info: pointer to HAL Tx proto ppdu info
1449 *
1450 * Return: void
1451 */
dp_tx_mon_record_tlv(struct dp_mon_pdev_be * mon_pdev_be,struct hal_tx_ppdu_info * data_ppdu_info,struct hal_tx_ppdu_info * proto_ppdu_info)1452 void dp_tx_mon_record_tlv(struct dp_mon_pdev_be *mon_pdev_be,
1453 struct hal_tx_ppdu_info *data_ppdu_info,
1454 struct hal_tx_ppdu_info *proto_ppdu_info)
1455 {
1456 struct hal_tx_ppdu_info *ppdu_info = NULL;
1457 struct dp_tx_mon_tlv_info *tlv_info = NULL;
1458 struct dp_mon_tlv_logger *tlv_log = NULL;
1459 uint16_t *ppdu_start_idx = NULL;
1460 uint16_t *mpdu_idx = NULL;
1461 uint16_t *ppdu_end_idx = NULL;
1462 uint32_t tlv_tag;
1463
1464 if (!mon_pdev_be || !(mon_pdev_be->tx_tlv_log))
1465 return;
1466
1467 tlv_log = mon_pdev_be->tx_tlv_log;
1468 if (!tlv_log->tlv_logging_enable || !(tlv_log->buff))
1469 return;
1470
1471 tlv_info = (struct dp_tx_mon_tlv_info *)tlv_log->buff;
1472 ppdu_start_idx = &tlv_log->ppdu_start_idx;
1473 mpdu_idx = &tlv_log->mpdu_idx;
1474 ppdu_end_idx = &tlv_log->ppdu_end_idx;
1475
1476 ppdu_info = (data_ppdu_info->tx_tlv_info.is_data_ppdu_info) ?
1477 data_ppdu_info : proto_ppdu_info;
1478 tlv_tag = ppdu_info->tx_tlv_info.tlv_tag;
1479
1480 if (ppdu_info->tx_tlv_info.tlv_category == CATEGORY_PPDU_START) {
1481 tlv_info[*ppdu_start_idx].tlv_tag = tlv_tag;
1482 switch (tlv_tag) {
1483 case WIFITX_FES_SETUP_E:
1484 case WIFITXPCU_BUFFER_STATUS_E:
1485 case WIFIPCU_PPDU_SETUP_INIT_E:
1486 case WIFISCH_CRITICAL_TLV_REFERENCE_E:
1487 case WIFITX_PEER_ENTRY_E:
1488 case WIFITX_RAW_OR_NATIVE_FRAME_SETUP_E:
1489 case WIFITX_QUEUE_EXTENSION_E:
1490 case WIFITX_FES_SETUP_COMPLETE_E:
1491 case WIFIFW2SW_MON_E:
1492 case WIFISCHEDULER_END_E:
1493 case WIFITQM_MPDU_GLOBAL_START_E:
1494 ;
1495 }
1496 if (*ppdu_start_idx < tlv_log->max_ppdu_start_idx)
1497 (*ppdu_start_idx)++;
1498 } else if (ppdu_info->tx_tlv_info.tlv_category == CATEGORY_MPDU) {
1499 tlv_info[*mpdu_idx].tlv_tag = tlv_tag;
1500 switch (tlv_tag) {
1501 case WIFITX_MPDU_START_E:
1502 case WIFITX_MSDU_START_E:
1503 case WIFITX_DATA_E:
1504 case WIFITX_MSDU_END_E:
1505 case WIFITX_MPDU_END_E:
1506 ;
1507 }
1508 if (*mpdu_idx < tlv_log->max_mpdu_idx) {
1509 (*mpdu_idx)++;
1510 } else {
1511 *mpdu_idx = *mpdu_idx - MAX_MPDU_TLV_NUM + 1;
1512 tlv_log->wrap_flag ^= 1;
1513 }
1514 } else if (ppdu_info->tx_tlv_info.tlv_category == CATEGORY_PPDU_END) {
1515 tlv_info[*ppdu_end_idx].tlv_tag = tlv_tag;
1516 switch (tlv_tag) {
1517 case WIFITX_LAST_MPDU_FETCHED_E:
1518 case WIFITX_LAST_MPDU_END_E:
1519 case WIFIPDG_TX_REQ_E:
1520 case WIFITX_FES_STATUS_START_PPDU_E:
1521 case WIFIPHYTX_PPDU_HEADER_INFO_REQUEST_E:
1522 case WIFIMACTX_L_SIG_A_E:
1523 case WIFITXPCU_PREAMBLE_DONE_E:
1524 case WIFIMACTX_USER_DESC_COMMON_E:
1525 case WIFIMACTX_SERVICE_E:
1526 case WIFITXDMA_STOP_REQUEST_E:
1527 case WIFITXPCU_USER_BUFFER_STATUS_E:
1528 case WIFITX_FES_STATUS_USER_PPDU_E:
1529 case WIFITX_MPDU_COUNT_TRANSFER_END_E:
1530 case WIFIRX_START_PARAM_E:
1531 case WIFITX_FES_STATUS_ACK_OR_BA_E:
1532 case WIFITX_FES_STATUS_USER_RESPONSE_E:
1533 case WIFITX_FES_STATUS_END_E:
1534 case WIFITX_FES_STATUS_PROT_E:
1535 case WIFIMACTX_PHY_DESC_E:
1536 case WIFIMACTX_HE_SIG_A_SU_E:
1537 ;
1538 }
1539 if (*ppdu_end_idx < tlv_log->max_ppdu_end_idx)
1540 (*ppdu_end_idx)++;
1541 }
1542 }
1543
1544 /**
1545 * dp_tx_mon_record_clear_buffer() - Clear the buffer to record next PPDU
1546 *
1547 * @mon_pdev_be : pointer to dp_mon_pdev_be
1548 *
1549 * Return
1550 */
dp_tx_mon_record_clear_buffer(struct dp_mon_pdev_be * mon_pdev_be)1551 void dp_tx_mon_record_clear_buffer(struct dp_mon_pdev_be *mon_pdev_be)
1552 {
1553 struct dp_mon_tlv_logger *tlv_log = NULL;
1554 struct dp_tx_mon_tlv_info *tlv_info = NULL;
1555
1556 tlv_log = mon_pdev_be->tx_tlv_log;
1557 tlv_info = (struct dp_tx_mon_tlv_info *)tlv_log->buff;
1558 qdf_mem_zero(&tlv_info[tlv_log->ppdu_start_idx],
1559 MAX_TLVS_PER_PPDU *
1560 sizeof(struct dp_tx_mon_tlv_info));
1561 }
1562 #else
1563
1564 static
dp_tx_mon_record_index_update(struct dp_mon_pdev_be * mon_pdev_be)1565 void dp_tx_mon_record_index_update(struct dp_mon_pdev_be *mon_pdev_be)
1566 {
1567 }
1568
1569 static
dp_tx_mon_record_tlv(struct dp_mon_pdev_be * mon_pdev_be,struct hal_tx_ppdu_info * data_ppdu_info,struct hal_tx_ppdu_info * proto_ppdu_info)1570 void dp_tx_mon_record_tlv(struct dp_mon_pdev_be *mon_pdev_be,
1571 struct hal_tx_ppdu_info *data_ppdu_info,
1572 struct hal_tx_ppdu_info *proto_ppdu_info)
1573 {
1574 }
1575
1576 static
dp_tx_mon_record_clear_buffer(struct dp_mon_pdev_be * mon_pdev_be)1577 void dp_tx_mon_record_clear_buffer(struct dp_mon_pdev_be *mon_pdev_be)
1578 {
1579 }
1580 #endif
1581 /**
1582 * dp_tx_mon_process_tlv_2_0() - API to parse PPDU worth information
1583 * @pdev: DP_PDEV handle
1584 * @mon_desc_list_ref: tx monitor descriptor list reference
1585 *
1586 * Return: status
1587 */
1588 static QDF_STATUS
dp_tx_mon_process_tlv_2_0(struct dp_pdev * pdev,struct dp_tx_mon_desc_list * mon_desc_list_ref)1589 dp_tx_mon_process_tlv_2_0(struct dp_pdev *pdev,
1590 struct dp_tx_mon_desc_list *mon_desc_list_ref)
1591 {
1592 struct dp_mon_pdev *mon_pdev;
1593 struct dp_mon_pdev_be *mon_pdev_be;
1594 struct dp_pdev_tx_monitor_be *tx_mon_be;
1595 struct dp_tx_ppdu_info *tx_prot_ppdu_info = NULL;
1596 struct dp_tx_ppdu_info *tx_data_ppdu_info = NULL;
1597 struct hal_tx_status_info *tx_status_prot;
1598 struct hal_tx_status_info *tx_status_data;
1599 qdf_frag_t status_frag = NULL;
1600 uint32_t end_offset = 0;
1601 uint32_t tlv_status;
1602 uint32_t status = QDF_STATUS_SUCCESS;
1603 uint8_t *tx_tlv;
1604 uint8_t *tx_tlv_start;
1605 uint8_t num_users = 0;
1606 uint8_t cur_frag_q_idx;
1607 bool schedule_wrq = false;
1608
1609 /* sanity check */
1610 if (qdf_unlikely(!pdev))
1611 return QDF_STATUS_E_NOMEM;
1612
1613 mon_pdev = pdev->monitor_pdev;
1614 if (qdf_unlikely(!mon_pdev))
1615 return QDF_STATUS_E_NOMEM;
1616
1617 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1618 if (qdf_unlikely(!mon_pdev_be))
1619 return QDF_STATUS_E_NOMEM;
1620
1621 tx_mon_be = &mon_pdev_be->tx_monitor_be;
1622 cur_frag_q_idx = tx_mon_be->cur_frag_q_idx;
1623
1624 tx_status_prot = &tx_mon_be->prot_status_info;
1625 tx_status_data = &tx_mon_be->data_status_info;
1626
1627 tx_prot_ppdu_info = dp_tx_mon_get_ppdu_info(pdev, TX_PROT_PPDU_INFO,
1628 1, tx_mon_be->be_ppdu_id);
1629
1630 if (!tx_prot_ppdu_info) {
1631 dp_mon_info("tx prot ppdu info alloc got failed!!");
1632 return QDF_STATUS_E_NOMEM;
1633 }
1634
1635 status_frag = tx_mon_be->frag_q_vec[cur_frag_q_idx].frag_buf;
1636 end_offset = tx_mon_be->frag_q_vec[cur_frag_q_idx].end_offset;
1637 tx_tlv = status_frag;
1638 dp_mon_debug("last_frag_q_idx: %d status_frag:%pK",
1639 tx_mon_be->last_frag_q_idx, status_frag);
1640
1641 /* get number of user from tlv window */
1642 tlv_status = hal_txmon_status_get_num_users(pdev->soc->hal_soc,
1643 tx_tlv, &num_users);
1644 if (tlv_status == HAL_MON_TX_STATUS_PPDU_NOT_DONE || !num_users) {
1645 dp_tx_mon_free_ppdu_info(tx_prot_ppdu_info, tx_mon_be);
1646 tx_mon_be->tx_prot_ppdu_info = NULL;
1647 dp_mon_err("window open with tlv_tag[0x%x] num_users[%d]!\n",
1648 hal_tx_status_get_tlv_tag(tx_tlv), num_users);
1649 return QDF_STATUS_E_INVAL;
1650 }
1651
1652 /* allocate tx_data_ppdu_info based on num_users */
1653 tx_data_ppdu_info = dp_tx_mon_get_ppdu_info(pdev, TX_DATA_PPDU_INFO,
1654 num_users,
1655 tx_mon_be->be_ppdu_id);
1656 if (!tx_data_ppdu_info) {
1657 dp_tx_mon_free_ppdu_info(tx_prot_ppdu_info, tx_mon_be);
1658 tx_mon_be->tx_prot_ppdu_info = NULL;
1659 dp_mon_info("tx prot ppdu info alloc got failed!!");
1660 return QDF_STATUS_E_NOMEM;
1661 }
1662
1663 /* iterate status buffer queue */
1664 while (tx_mon_be->cur_frag_q_idx < tx_mon_be->last_frag_q_idx) {
1665 /* get status buffer from frag_q_vec */
1666 status_frag = tx_mon_be->frag_q_vec[cur_frag_q_idx].frag_buf;
1667 end_offset = tx_mon_be->frag_q_vec[cur_frag_q_idx].end_offset;
1668 if (qdf_unlikely(!status_frag)) {
1669 dp_mon_err("status frag is NULL\n");
1670 QDF_BUG(0);
1671 }
1672
1673 tx_tlv = status_frag;
1674 tx_tlv_start = tx_tlv;
1675
1676 dp_tx_mon_record_clear_buffer(mon_pdev_be);
1677 /*
1678 * parse each status buffer and populate the information to
1679 * dp_tx_ppdu_info
1680 */
1681 do {
1682 tlv_status = hal_txmon_status_parse_tlv(
1683 pdev->soc->hal_soc,
1684 &tx_data_ppdu_info->hal_txmon,
1685 &tx_prot_ppdu_info->hal_txmon,
1686 tx_status_data,
1687 tx_status_prot,
1688 tx_tlv, status_frag);
1689
1690 dp_tx_mon_record_tlv(mon_pdev_be,
1691 &tx_data_ppdu_info->hal_txmon,
1692 &tx_prot_ppdu_info->hal_txmon);
1693
1694 status =
1695 dp_tx_mon_update_ppdu_info_status(
1696 pdev,
1697 tx_data_ppdu_info,
1698 tx_prot_ppdu_info,
1699 tx_tlv,
1700 status_frag,
1701 tlv_status,
1702 mon_desc_list_ref);
1703
1704 /* need api definition for hal_tx_status_get_next_tlv */
1705 tx_tlv = hal_tx_status_get_next_tlv(tx_tlv,
1706 mon_pdev->is_tlv_hdr_64_bit);
1707 if ((tx_tlv - tx_tlv_start) >= end_offset)
1708 break;
1709 } while ((tx_tlv - tx_tlv_start) < end_offset);
1710
1711 /*
1712 * free status buffer after parsing
1713 * is status_frag mapped to mpdu if so make sure
1714 */
1715 tx_mon_be->stats.status_buf_free++;
1716 qdf_frag_free(status_frag);
1717 tx_mon_be->frag_q_vec[cur_frag_q_idx].frag_buf = NULL;
1718 tx_mon_be->frag_q_vec[cur_frag_q_idx].end_offset = 0;
1719 cur_frag_q_idx = ++tx_mon_be->cur_frag_q_idx;
1720
1721 dp_tx_mon_record_index_update(mon_pdev_be);
1722 }
1723
1724 /* clear the unreleased frag array */
1725 dp_tx_mon_status_queue_free(pdev, tx_mon_be, mon_desc_list_ref);
1726
1727 if (TXMON_PPDU_HAL(tx_prot_ppdu_info, is_used)) {
1728 if (qdf_unlikely(!TXMON_PPDU_COM(tx_prot_ppdu_info,
1729 chan_num))) {
1730 /* update channel number, if not fetched properly */
1731 TXMON_PPDU_COM(tx_prot_ppdu_info,
1732 chan_num) = mon_pdev->mon_chan_num;
1733 }
1734
1735 if (qdf_unlikely(!TXMON_PPDU_COM(tx_prot_ppdu_info,
1736 chan_freq))) {
1737 /* update channel frequency, if not fetched properly */
1738 TXMON_PPDU_COM(tx_prot_ppdu_info,
1739 chan_freq) = mon_pdev->mon_chan_freq;
1740 }
1741
1742 /*
1743 * add dp_tx_ppdu_info to pdev queue
1744 * for post processing
1745 *
1746 * TODO: add a threshold check and drop the ppdu info
1747 */
1748 qdf_spin_lock_bh(&tx_mon_be->tx_mon_list_lock);
1749 tx_mon_be->last_prot_ppdu_info =
1750 tx_mon_be->tx_prot_ppdu_info;
1751 STAILQ_INSERT_TAIL(&tx_mon_be->tx_ppdu_info_queue,
1752 tx_prot_ppdu_info,
1753 tx_ppdu_info_queue_elem);
1754 tx_mon_be->tx_ppdu_info_list_depth++;
1755
1756 tx_mon_be->tx_prot_ppdu_info = NULL;
1757 qdf_spin_unlock_bh(&tx_mon_be->tx_mon_list_lock);
1758 schedule_wrq = true;
1759 } else {
1760 dp_tx_mon_free_ppdu_info(tx_prot_ppdu_info, tx_mon_be);
1761 tx_mon_be->tx_prot_ppdu_info = NULL;
1762 tx_prot_ppdu_info = NULL;
1763 }
1764
1765 if (TXMON_PPDU_HAL(tx_data_ppdu_info, is_used)) {
1766 if (qdf_unlikely(!TXMON_PPDU_COM(tx_data_ppdu_info,
1767 chan_num))) {
1768 /* update channel number, if not fetched properly */
1769 TXMON_PPDU_COM(tx_data_ppdu_info,
1770 chan_num) = mon_pdev->mon_chan_num;
1771 }
1772
1773 if (qdf_unlikely(!TXMON_PPDU_COM(tx_data_ppdu_info,
1774 chan_freq))) {
1775 /* update channel frequency, if not fetched properly */
1776 TXMON_PPDU_COM(tx_data_ppdu_info,
1777 chan_freq) = mon_pdev->mon_chan_freq;
1778 }
1779
1780 /*
1781 * add dp_tx_ppdu_info to pdev queue
1782 * for post processing
1783 *
1784 * TODO: add a threshold check and drop the ppdu info
1785 */
1786 qdf_spin_lock_bh(&tx_mon_be->tx_mon_list_lock);
1787 tx_mon_be->last_data_ppdu_info =
1788 tx_mon_be->tx_data_ppdu_info;
1789 STAILQ_INSERT_TAIL(&tx_mon_be->tx_ppdu_info_queue,
1790 tx_data_ppdu_info,
1791 tx_ppdu_info_queue_elem);
1792 tx_mon_be->tx_ppdu_info_list_depth++;
1793
1794 tx_mon_be->tx_data_ppdu_info = NULL;
1795 qdf_spin_unlock_bh(&tx_mon_be->tx_mon_list_lock);
1796 schedule_wrq = true;
1797 } else {
1798 dp_tx_mon_free_ppdu_info(tx_data_ppdu_info, tx_mon_be);
1799 tx_mon_be->tx_data_ppdu_info = NULL;
1800 tx_data_ppdu_info = NULL;
1801 }
1802
1803 if (schedule_wrq)
1804 qdf_queue_work(NULL, tx_mon_be->post_ppdu_workqueue,
1805 &tx_mon_be->post_ppdu_work);
1806
1807 return QDF_STATUS_SUCCESS;
1808 }
1809
dp_tx_mon_update_end_reason(struct dp_mon_pdev * mon_pdev,int ppdu_id,int end_reason)1810 void dp_tx_mon_update_end_reason(struct dp_mon_pdev *mon_pdev,
1811 int ppdu_id, int end_reason)
1812 {
1813 struct dp_mon_pdev_be *mon_pdev_be;
1814 struct dp_pdev_tx_monitor_be *tx_mon_be;
1815
1816 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1817 if (qdf_unlikely(!mon_pdev_be))
1818 return;
1819
1820 tx_mon_be = &mon_pdev_be->tx_monitor_be;
1821
1822 tx_mon_be->be_end_reason_bitmap |= (1 << end_reason);
1823 }
1824
1825 QDF_STATUS
dp_tx_mon_process_status_tlv(struct dp_soc * soc,struct dp_pdev * pdev,struct hal_mon_desc * mon_ring_desc,qdf_frag_t status_frag,uint32_t end_offset,struct dp_tx_mon_desc_list * mon_desc_list_ref)1826 dp_tx_mon_process_status_tlv(struct dp_soc *soc,
1827 struct dp_pdev *pdev,
1828 struct hal_mon_desc *mon_ring_desc,
1829 qdf_frag_t status_frag,
1830 uint32_t end_offset,
1831 struct dp_tx_mon_desc_list *mon_desc_list_ref)
1832 {
1833 struct dp_mon_pdev *mon_pdev;
1834 struct dp_mon_pdev_be *mon_pdev_be;
1835 struct dp_pdev_tx_monitor_be *tx_mon_be = NULL;
1836 uint8_t last_frag_q_idx = 0;
1837
1838 /* sanity check */
1839 if (qdf_unlikely(!pdev))
1840 goto free_status_buffer;
1841
1842 mon_pdev = pdev->monitor_pdev;
1843 if (qdf_unlikely(!mon_pdev))
1844 goto free_status_buffer;
1845
1846 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1847 if (qdf_unlikely(!mon_pdev_be))
1848 goto free_status_buffer;
1849
1850 tx_mon_be = &mon_pdev_be->tx_monitor_be;
1851
1852 if (qdf_unlikely(tx_mon_be->last_frag_q_idx >
1853 MAX_STATUS_BUFFER_IN_PPDU)) {
1854 dp_mon_err("status frag queue for a ppdu[%d] exceed %d\n",
1855 tx_mon_be->be_ppdu_id,
1856 MAX_STATUS_BUFFER_IN_PPDU);
1857 dp_tx_mon_status_queue_free(pdev, tx_mon_be, mon_desc_list_ref);
1858 goto free_status_buffer;
1859 }
1860
1861 if (tx_mon_be->mode == TX_MON_BE_DISABLE &&
1862 !dp_lite_mon_is_tx_enabled(mon_pdev)) {
1863 dp_tx_mon_status_queue_free(pdev, tx_mon_be,
1864 mon_desc_list_ref);
1865 goto free_status_buffer;
1866 }
1867
1868 if (tx_mon_be->be_ppdu_id != mon_ring_desc->ppdu_id &&
1869 tx_mon_be->last_frag_q_idx) {
1870 if (tx_mon_be->be_end_reason_bitmap &
1871 (1 << HAL_MON_FLUSH_DETECTED)) {
1872 tx_mon_be->stats.ppdu_info_drop_flush++;
1873 dp_tx_mon_status_queue_free(pdev, tx_mon_be,
1874 mon_desc_list_ref);
1875 } else if (tx_mon_be->be_end_reason_bitmap &
1876 (1 << HAL_MON_PPDU_TRUNCATED)) {
1877 tx_mon_be->stats.ppdu_info_drop_trunc++;
1878 dp_tx_mon_status_queue_free(pdev, tx_mon_be,
1879 mon_desc_list_ref);
1880 } else {
1881 dp_mon_err("End of ppdu not seen PID:%d cur_pid:%d idx:%d",
1882 tx_mon_be->be_ppdu_id,
1883 mon_ring_desc->ppdu_id,
1884 tx_mon_be->last_frag_q_idx);
1885 /* schedule ppdu worth information */
1886 dp_tx_mon_status_queue_free(pdev, tx_mon_be,
1887 mon_desc_list_ref);
1888 }
1889
1890 /* reset end reason bitmap */
1891 tx_mon_be->be_end_reason_bitmap = 0;
1892 tx_mon_be->last_frag_q_idx = 0;
1893 tx_mon_be->cur_frag_q_idx = 0;
1894 }
1895
1896 tx_mon_be->be_ppdu_id = mon_ring_desc->ppdu_id;
1897 tx_mon_be->be_end_reason_bitmap |= (1 << mon_ring_desc->end_reason);
1898
1899 last_frag_q_idx = tx_mon_be->last_frag_q_idx;
1900
1901 tx_mon_be->frag_q_vec[last_frag_q_idx].frag_buf = status_frag;
1902 tx_mon_be->frag_q_vec[last_frag_q_idx].end_offset = end_offset;
1903 tx_mon_be->last_frag_q_idx++;
1904
1905 if (mon_ring_desc->end_reason == HAL_MON_END_OF_PPDU) {
1906 /* drop processing of tlv, if ppdu info list exceed threshold */
1907 if ((tx_mon_be->defer_ppdu_info_list_depth +
1908 tx_mon_be->tx_ppdu_info_list_depth) >
1909 MAX_PPDU_INFO_LIST_DEPTH) {
1910 tx_mon_be->stats.ppdu_info_drop_th++;
1911 dp_tx_mon_status_queue_free(pdev, tx_mon_be,
1912 mon_desc_list_ref);
1913 return QDF_STATUS_E_PENDING;
1914 }
1915
1916 if (dp_tx_mon_process_tlv_2_0(pdev,
1917 mon_desc_list_ref) !=
1918 QDF_STATUS_SUCCESS)
1919 dp_tx_mon_status_queue_free(pdev, tx_mon_be,
1920 mon_desc_list_ref);
1921 }
1922
1923 return QDF_STATUS_SUCCESS;
1924
1925 free_status_buffer:
1926 dp_tx_mon_status_free_packet_buf(pdev, status_frag, end_offset,
1927 mon_desc_list_ref);
1928 if (qdf_likely(tx_mon_be))
1929 tx_mon_be->stats.status_buf_free++;
1930
1931 qdf_frag_free(status_frag);
1932
1933 return QDF_STATUS_E_NOMEM;
1934 }
1935
1936 #endif
1937
1938 #ifdef WLAN_TX_MON_CORE_DEBUG
1939 QDF_STATUS
dp_tx_mon_process_status_tlv(struct dp_soc * soc,struct dp_pdev * pdev,struct hal_mon_desc * mon_ring_desc,qdf_frag_t status_frag,uint32_t end_offset,struct dp_tx_mon_desc_list * mon_desc_list_ref)1940 dp_tx_mon_process_status_tlv(struct dp_soc *soc,
1941 struct dp_pdev *pdev,
1942 struct hal_mon_desc *mon_ring_desc,
1943 qdf_frag_t status_frag,
1944 uint32_t end_offset,
1945 struct dp_tx_mon_desc_list *mon_desc_list_ref)
1946 {
1947 struct dp_mon_pdev *mon_pdev;
1948 struct dp_mon_pdev_be *mon_pdev_be;
1949 struct dp_pdev_tx_monitor_be *tx_mon_be;
1950
1951 /* sanity check */
1952 if (qdf_unlikely(!pdev))
1953 return QDF_STATUS_E_INVAL;
1954
1955 mon_pdev = pdev->monitor_pdev;
1956 if (qdf_unlikely(!mon_pdev))
1957 return QDF_STATUS_E_INVAL;
1958
1959 mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1960 if (qdf_unlikely(!mon_pdev_be))
1961 return QDF_STATUS_E_INVAL;
1962
1963 tx_mon_be = &mon_pdev_be->tx_monitor_be;
1964
1965 dp_tx_mon_status_free_packet_buf(pdev, status_frag, end_offset,
1966 mon_desc_list_ref);
1967 tx_mon_be->stats.status_buf_free++;
1968 qdf_frag_free(status_frag);
1969
1970 return QDF_STATUS_E_INVAL;
1971 }
1972
dp_tx_mon_update_end_reason(struct dp_mon_pdev * mon_pdev,int ppdu_id,int end_reason)1973 void dp_tx_mon_update_end_reason(struct dp_mon_pdev *mon_pdev,
1974 int ppdu_id, int end_reason)
1975 {
1976 }
1977 #endif
1978
1979 #if defined(WLAN_TX_PKT_CAPTURE_ENH_BE) && defined(WLAN_PKT_CAPTURE_TX_2_0) && \
1980 defined(BE_PKTLOG_SUPPORT)
1981 QDF_STATUS
dp_tx_process_pktlog_be(struct dp_soc * soc,struct dp_pdev * pdev,qdf_frag_t status_frag,uint32_t end_offset)1982 dp_tx_process_pktlog_be(struct dp_soc *soc, struct dp_pdev *pdev,
1983 qdf_frag_t status_frag, uint32_t end_offset)
1984 {
1985 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1986 qdf_nbuf_t nbuf = NULL;
1987 enum WDI_EVENT pktlog_mode = WDI_NO_VAL;
1988 int frag_bytes;
1989
1990 if (!mon_pdev->pktlog_hybrid_mode)
1991 return QDF_STATUS_E_INVAL;
1992
1993 nbuf = qdf_nbuf_alloc(soc->osdev, MAX_DUMMY_FRM_BODY, 0, 4, FALSE);
1994 if (!nbuf)
1995 return QDF_STATUS_E_NOMEM;
1996
1997 qdf_nbuf_add_rx_frag(status_frag, nbuf, 0,
1998 (end_offset + 1),
1999 0, true);
2000
2001 if (mon_pdev->pktlog_hybrid_mode)
2002 pktlog_mode = WDI_EVENT_HYBRID_TX;
2003
2004 frag_bytes = qdf_nbuf_get_frag_len(nbuf, 0);
2005 if (pktlog_mode != WDI_NO_VAL) {
2006 dp_wdi_event_handler(pktlog_mode, soc,
2007 nbuf, HTT_INVALID_PEER,
2008 WDI_NO_VAL, pdev->pdev_id);
2009 }
2010 qdf_nbuf_free(nbuf);
2011
2012 return QDF_STATUS_SUCCESS;
2013 }
2014 #endif
2015