1 /*
2 * Copyright (c) 2011-2020 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #ifndef _OL_TXRX_INTERNAL__H_
21 #define _OL_TXRX_INTERNAL__H_
22
23 #include <qdf_util.h> /* qdf_assert */
24 #include <qdf_nbuf.h> /* qdf_nbuf_t */
25 #include <qdf_mem.h> /* qdf_mem_set */
26 #include <cds_ieee80211_common.h> /* ieee80211_frame */
27 #include <ol_htt_rx_api.h> /* htt_rx_msdu_desc_completes_mpdu, etc. */
28
29 #include <ol_txrx_types.h>
30
31 #include <ol_txrx_dbg.h>
32 #include <enet.h> /* ETHERNET_HDR_LEN, etc. */
33 #include <ipv4.h> /* IPV4_HDR_LEN, etc. */
34 #include <ip_prot.h> /* IP_PROTOCOL_TCP, etc. */
35
36 #ifdef ATH_11AC_TXCOMPACT
37 #define OL_TX_DESC_NO_REFS(tx_desc) 1
38 #define OL_TX_DESC_REF_INIT(tx_desc) /* no-op */
39 #define OL_TX_DESC_REF_INC(tx_desc) /* no-op */
40 #else
41 #define OL_TX_DESC_NO_REFS(tx_desc) \
42 qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
43 #define OL_TX_DESC_REF_INIT(tx_desc) qdf_atomic_init(&tx_desc->ref_cnt)
44 #define OL_TX_DESC_REF_INC(tx_desc) qdf_atomic_inc(&tx_desc->ref_cnt)
45 #endif
46
47 #ifndef TXRX_ASSERT_LEVEL
48 #define TXRX_ASSERT_LEVEL 3
49 #endif
50
51 #ifdef __KLOCWORK__
52 #define TXRX_ASSERT1(x) do { if (!(x)) abort(); } while (0)
53 #define TXRX_ASSERT2(x) do { if (!(x)) abort(); } while (0)
54 #else /* #ifdef __KLOCWORK__ */
55
56 #if TXRX_ASSERT_LEVEL > 0
57 #define TXRX_ASSERT1(condition) qdf_assert((condition))
58 #else
59 #define TXRX_ASSERT1(condition)
60 #endif
61
62 #if TXRX_ASSERT_LEVEL > 1
63 #define TXRX_ASSERT2(condition) qdf_assert((condition))
64 #else
65 #define TXRX_ASSERT2(condition)
66 #endif
67 #endif /* #ifdef __KLOCWORK__ */
68
69 #ifdef TXRX_PRINT_ENABLE
70
71 #include "qdf_types.h" /* qdf_vprint */
72
73 #define ol_txrx_alert(params...) \
74 QDF_TRACE_FATAL(QDF_MODULE_ID_TXRX, params)
75 #define ol_txrx_err(params...) \
76 QDF_TRACE_ERROR(QDF_MODULE_ID_TXRX, params)
77 #define ol_txrx_warn(params...) \
78 QDF_TRACE_WARN(QDF_MODULE_ID_TXRX, params)
79 #define ol_txrx_info(params...) \
80 QDF_TRACE_INFO(QDF_MODULE_ID_TXRX, params)
81 #define ol_txrx_info_high(params...) \
82 QDF_TRACE_INFO(QDF_MODULE_ID_TXRX, params)
83 #define ol_txrx_dbg(params...) \
84 QDF_TRACE_DEBUG(QDF_MODULE_ID_TXRX, params)
85
86 #define txrx_nofl_alert(params...) \
87 QDF_TRACE_FATAL_NO_FL(QDF_MODULE_ID_TXRX, params)
88 #define txrx_nofl_err(params...) \
89 QDF_TRACE_ERROR_NO_FL(QDF_MODULE_ID_TXRX, params)
90 #define txrx_nofl_warn(params...) \
91 QDF_TRACE_WARN_NO_FL(QDF_MODULE_ID_TXRX, params)
92 #define txrx_nofl_info(params...) \
93 QDF_TRACE_INFO_NO_FL(QDF_MODULE_ID_TXRX, params)
94 #define txrx_nofl_dbg(params...) \
95 QDF_TRACE_DEBUG_NO_FL(QDF_MODULE_ID_TXRX, params)
96
97 #define ol_txrx_err_rl(params...) \
98 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_TXRX, params)
99
100 /*
101 * define PN check failure message print rate
102 * as 1 second
103 */
104 #define TXRX_PN_CHECK_FAILURE_PRINT_PERIOD_MS 1000
105
106 #else
107
108 #define ol_txrx_alert(format, args...)
109 #define ol_txrx_err(format, args...)
110 #define ol_txrx_warn(format, args...)
111 #define ol_txrx_info(format, args...)
112 #define ol_txrx_info_high(format, args...)
113 #define ol_txrx_dbg(format, args...)
114
115 #define txrx_nofl_alert(params...)
116 #define txrx_nofl_err(params...)
117 #define txrx_nofl_warn(params...)
118 #define txrx_nofl_info(params...)
119 #define txrx_nofl_dbg(params...)
120
121 #define ol_txrx_err_rl(params...)
122
123 #endif /* TXRX_PRINT_ENABLE */
124
125 /*--- tx credit debug printouts ---*/
126
127 #ifndef DEBUG_CREDIT
128 #define DEBUG_CREDIT 0
129 #endif
130
131 #if DEBUG_CREDIT
132 #define TX_CREDIT_DEBUG_PRINT(fmt, ...) qdf_print(fmt, ## __VA_ARGS__)
133 #else
134 #define TX_CREDIT_DEBUG_PRINT(fmt, ...)
135 #endif
136
137 /*--- tx scheduler debug printouts ---*/
138
139 #ifdef HOST_TX_SCHED_DEBUG
140 #define TX_SCHED_DEBUG_PRINT(fmt, ...) qdf_print(fmt, ## __VA_ARGS__)
141 #else
142 #define TX_SCHED_DEBUG_PRINT(fmt, ...)
143 #endif
144 #define TX_SCHED_DEBUG_PRINT_ALWAYS(fmt, ...) qdf_print(fmt, ## __VA_ARGS__)
145
146 #define OL_TXRX_LIST_APPEND(head, tail, elem) \
147 do { \
148 if (!(head)) { \
149 (head) = (elem); \
150 } else { \
151 qdf_nbuf_set_next((tail), (elem)); \
152 } \
153 (tail) = (elem); \
154 } while (0)
155
156 static inline void
ol_rx_mpdu_list_next(struct ol_txrx_pdev_t * pdev,void * mpdu_list,qdf_nbuf_t * mpdu_tail,qdf_nbuf_t * next_mpdu)157 ol_rx_mpdu_list_next(struct ol_txrx_pdev_t *pdev,
158 void *mpdu_list,
159 qdf_nbuf_t *mpdu_tail, qdf_nbuf_t *next_mpdu)
160 {
161 htt_pdev_handle htt_pdev = pdev->htt_pdev;
162 qdf_nbuf_t msdu;
163
164 /*
165 * For now, we use a simply flat list of MSDUs.
166 * So, traverse the list until we reach the last MSDU within the MPDU.
167 */
168 TXRX_ASSERT2(mpdu_list);
169 msdu = mpdu_list;
170 while (!htt_rx_msdu_desc_completes_mpdu
171 (htt_pdev, htt_rx_msdu_desc_retrieve(htt_pdev, msdu))) {
172 if (!qdf_nbuf_next(msdu)) {
173 qdf_err("last-msdu bit not set!");
174 break;
175 } else {
176 msdu = qdf_nbuf_next(msdu);
177 }
178 TXRX_ASSERT2(msdu);
179 }
180 /* msdu now points to the last MSDU within the first MPDU */
181 *mpdu_tail = msdu;
182 *next_mpdu = qdf_nbuf_next(msdu);
183 }
184
185 /*--- txrx stats macros ---*/
186
187 /* unconditional defs */
188 #define TXRX_STATS_INCR(pdev, field) TXRX_STATS_ADD(pdev, field, 1)
189
190 /* default conditional defs (may be undefed below) */
191
192 #define TXRX_STATS_INIT(_pdev) \
193 qdf_mem_zero(&((_pdev)->stats), sizeof((_pdev)->stats))
194 #define TXRX_STATS_ADD(_pdev, _field, _delta) { \
195 _pdev->stats._field += _delta; }
196 #define TXRX_STATS_MSDU_INCR(pdev, field, netbuf) \
197 do { \
198 TXRX_STATS_INCR((pdev), pub.field.pkts); \
199 TXRX_STATS_ADD((pdev), pub.field.bytes, qdf_nbuf_len(netbuf)); \
200 } while (0)
201
202 /* conditional defs based on verbosity level */
203
204
205 #define TXRX_STATS_MSDU_LIST_INCR(pdev, field, netbuf_list) \
206 do { \
207 qdf_nbuf_t tmp_list = netbuf_list; \
208 while (tmp_list) { \
209 TXRX_STATS_MSDU_INCR(pdev, field, tmp_list); \
210 tmp_list = qdf_nbuf_next(tmp_list); \
211 } \
212 } while (0)
213
214 #define TXRX_STATS_MSDU_INCR_TX_STATUS(status, pdev, netbuf) do { \
215 if (status == htt_tx_status_ok) \
216 TXRX_STATS_MSDU_INCR(pdev, tx.delivered, netbuf); \
217 else if (status == htt_tx_status_discard) \
218 TXRX_STATS_MSDU_INCR(pdev, tx.dropped.target_discard, \
219 netbuf); \
220 else if (status == htt_tx_status_no_ack) \
221 TXRX_STATS_MSDU_INCR(pdev, tx.dropped.no_ack, netbuf); \
222 else if (status == htt_tx_status_drop) \
223 TXRX_STATS_MSDU_INCR(pdev, tx.dropped.target_drop, \
224 netbuf); \
225 else if (status == htt_tx_status_download_fail) \
226 TXRX_STATS_MSDU_INCR(pdev, tx.dropped.download_fail, \
227 netbuf); \
228 else \
229 /* NO-OP */; \
230 } while (0)
231
232 #define TXRX_STATS_UPDATE_TX_COMP_HISTOGRAM(_pdev, _p_cntrs) \
233 do { \
234 if (_p_cntrs == 1) { \
235 TXRX_STATS_ADD(_pdev, pub.tx.comp_histogram.pkts_1, 1);\
236 } else if (_p_cntrs > 2 && _p_cntrs <= 10) { \
237 TXRX_STATS_ADD(_pdev, \
238 pub.tx.comp_histogram.pkts_2_10, 1); \
239 } else if (_p_cntrs > 10 && _p_cntrs <= 20) { \
240 TXRX_STATS_ADD(_pdev, \
241 pub.tx.comp_histogram.pkts_11_20, 1); \
242 } else if (_p_cntrs > 20 && _p_cntrs <= 30) { \
243 TXRX_STATS_ADD(_pdev, \
244 pub.tx.comp_histogram.pkts_21_30, 1); \
245 } else if (_p_cntrs > 30 && _p_cntrs <= 40) { \
246 TXRX_STATS_ADD(_pdev, \
247 pub.tx.comp_histogram.pkts_31_40, 1); \
248 } else if (_p_cntrs > 40 && _p_cntrs <= 50) { \
249 TXRX_STATS_ADD(_pdev, \
250 pub.tx.comp_histogram.pkts_41_50, 1); \
251 } else if (_p_cntrs > 50 && _p_cntrs <= 60) { \
252 TXRX_STATS_ADD(_pdev, \
253 pub.tx.comp_histogram.pkts_51_60, 1); \
254 } else { \
255 TXRX_STATS_ADD(_pdev, \
256 pub.tx.comp_histogram.pkts_61_plus, 1); \
257 } \
258 } while (0)
259
260 #define TXRX_STATS_UPDATE_TX_STATS(_pdev, _status, _p_cntrs, _b_cntrs) \
261 do { \
262 switch (status) { \
263 case htt_tx_status_ok: \
264 TXRX_STATS_ADD(_pdev, \
265 pub.tx.delivered.pkts, _p_cntrs); \
266 TXRX_STATS_ADD(_pdev, \
267 pub.tx.delivered.bytes, _b_cntrs); \
268 break; \
269 case htt_tx_status_discard: \
270 TXRX_STATS_ADD(_pdev, \
271 pub.tx.dropped.target_discard.pkts, _p_cntrs);\
272 TXRX_STATS_ADD(_pdev, \
273 pub.tx.dropped.target_discard.bytes, _b_cntrs);\
274 break; \
275 case htt_tx_status_no_ack: \
276 TXRX_STATS_ADD(_pdev, pub.tx.dropped.no_ack.pkts, \
277 _p_cntrs); \
278 TXRX_STATS_ADD(_pdev, pub.tx.dropped.no_ack.bytes, \
279 _b_cntrs); \
280 break; \
281 case htt_tx_status_drop: \
282 TXRX_STATS_ADD(_pdev, \
283 pub.tx.dropped.target_drop.pkts, _p_cntrs);\
284 TXRX_STATS_ADD(_pdev, \
285 pub.tx.dropped.target_drop.bytes, _b_cntrs);\
286 break; \
287 case htt_tx_status_download_fail: \
288 TXRX_STATS_ADD(_pdev, \
289 pub.tx.dropped.download_fail.pkts, _p_cntrs); \
290 TXRX_STATS_ADD(_pdev, \
291 pub.tx.dropped.download_fail.bytes, _b_cntrs);\
292 break; \
293 default: \
294 TXRX_STATS_ADD(_pdev, \
295 pub.tx.dropped.others.pkts, _p_cntrs); \
296 TXRX_STATS_ADD(_pdev, \
297 pub.tx.dropped.others.bytes, _b_cntrs); \
298 break; \
299 } \
300 TXRX_STATS_UPDATE_TX_COMP_HISTOGRAM(_pdev, _p_cntrs); \
301 } while (0)
302
303
304 /*--- txrx sequence number trace macros ---*/
305
306 #define TXRX_SEQ_NUM_ERR(_status) (0xffff - _status)
307
308 #if defined(ENABLE_RX_REORDER_TRACE)
309
310 A_STATUS ol_rx_reorder_trace_attach(ol_txrx_pdev_handle pdev);
311 void ol_rx_reorder_trace_detach(ol_txrx_pdev_handle pdev);
312 void ol_rx_reorder_trace_add(ol_txrx_pdev_handle pdev,
313 uint8_t tid,
314 uint16_t reorder_idx,
315 uint16_t seq_num, int num_mpdus);
316
317 #define OL_RX_REORDER_TRACE_ATTACH ol_rx_reorder_trace_attach
318 #define OL_RX_REORDER_TRACE_DETACH ol_rx_reorder_trace_detach
319 #define OL_RX_REORDER_TRACE_ADD ol_rx_reorder_trace_add
320
321 #else
322
323 #define OL_RX_REORDER_TRACE_ATTACH(_pdev) A_OK
324 #define OL_RX_REORDER_TRACE_DETACH(_pdev)
325 #define OL_RX_REORDER_TRACE_ADD(pdev, tid, reorder_idx, seq_num, num_mpdus)
326
327 #endif /* ENABLE_RX_REORDER_TRACE */
328
329 /*--- txrx packet number trace macros ---*/
330
331 #if defined(ENABLE_RX_PN_TRACE)
332
333 A_STATUS ol_rx_pn_trace_attach(ol_txrx_pdev_handle pdev);
334 void ol_rx_pn_trace_detach(ol_txrx_pdev_handle pdev);
335 void ol_rx_pn_trace_add(struct ol_txrx_pdev_t *pdev,
336 struct ol_txrx_peer_t *peer,
337 uint16_t tid, void *rx_desc);
338
339 #define OL_RX_PN_TRACE_ATTACH ol_rx_pn_trace_attach
340 #define OL_RX_PN_TRACE_DETACH ol_rx_pn_trace_detach
341 #define OL_RX_PN_TRACE_ADD ol_rx_pn_trace_add
342
343 #else
344
345 #define OL_RX_PN_TRACE_ATTACH(_pdev) A_OK
346 #define OL_RX_PN_TRACE_DETACH(_pdev)
347 #define OL_RX_PN_TRACE_ADD(pdev, peer, tid, rx_desc)
348
349 #endif /* ENABLE_RX_PN_TRACE */
350
ol_txrx_ieee80211_hdrsize(const void * data)351 static inline int ol_txrx_ieee80211_hdrsize(const void *data)
352 {
353 const struct ieee80211_frame *wh = (const struct ieee80211_frame *)data;
354 int size = sizeof(struct ieee80211_frame);
355
356 /* NB: we don't handle control frames */
357 TXRX_ASSERT1((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
358 IEEE80211_FC0_TYPE_CTL);
359 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) ==
360 IEEE80211_FC1_DIR_DSTODS)
361 size += QDF_MAC_ADDR_SIZE;
362 if (IEEE80211_QOS_HAS_SEQ(wh)) {
363 size += sizeof(uint16_t);
364 /* Qos frame with Order bit set indicates an HTC frame */
365 if (wh->i_fc[1] & IEEE80211_FC1_ORDER)
366 size += sizeof(struct ieee80211_htc);
367 }
368 return size;
369 }
370
371 /*--- frame display utility ---*/
372
373 enum ol_txrx_frm_dump_options {
374 ol_txrx_frm_dump_contents = 0x1,
375 ol_txrx_frm_dump_tcp_seq = 0x2,
376 };
377
378 #ifdef TXRX_DEBUG_DATA
379 static inline void
ol_txrx_frms_dump(const char * name,struct ol_txrx_pdev_t * pdev,qdf_nbuf_t frm,enum ol_txrx_frm_dump_options display_options,int max_len)380 ol_txrx_frms_dump(const char *name,
381 struct ol_txrx_pdev_t *pdev,
382 qdf_nbuf_t frm,
383 enum ol_txrx_frm_dump_options display_options, int max_len)
384 {
385 #define TXRX_FRM_DUMP_MAX_LEN 128
386 uint8_t local_buf[TXRX_FRM_DUMP_MAX_LEN] = { 0 };
387 uint8_t *p;
388
389 if (name) {
390 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, "%s\n",
391 name);
392 }
393 while (frm) {
394 p = qdf_nbuf_data(frm);
395 if (display_options & ol_txrx_frm_dump_tcp_seq) {
396 int tcp_offset;
397 int l2_hdr_size;
398 uint16_t ethtype;
399 uint8_t ip_prot;
400
401 if (pdev->frame_format == wlan_frm_fmt_802_3) {
402 struct ethernet_hdr_t *enet_hdr =
403 (struct ethernet_hdr_t *)p;
404 l2_hdr_size = ETHERNET_HDR_LEN;
405
406 /*
407 * LLC/SNAP present?
408 */
409 ethtype = (enet_hdr->ethertype[0] << 8) |
410 enet_hdr->ethertype[1];
411 if (!IS_ETHERTYPE(ethertype)) {
412 /* 802.3 format */
413 struct llc_snap_hdr_t *llc_hdr;
414
415 llc_hdr = (struct llc_snap_hdr_t *)
416 (p + l2_hdr_size);
417 l2_hdr_size += LLC_SNAP_HDR_LEN;
418 ethtype = (llc_hdr->ethertype[0] << 8) |
419 llc_hdr->ethertype[1];
420 }
421 } else {
422 struct llc_snap_hdr_t *llc_hdr;
423
424 /* (generic?) 802.11 */
425 l2_hdr_size = sizeof(struct ieee80211_frame);
426 llc_hdr = (struct llc_snap_hdr_t *)
427 (p + l2_hdr_size);
428 l2_hdr_size += LLC_SNAP_HDR_LEN;
429 ethtype = (llc_hdr->ethertype[0] << 8) |
430 llc_hdr->ethertype[1];
431 }
432 if (ethtype == ETHERTYPE_IPV4) {
433 struct ipv4_hdr_t *ipv4_hdr;
434
435 ipv4_hdr =
436 (struct ipv4_hdr_t *)(p + l2_hdr_size);
437 ip_prot = ipv4_hdr->protocol;
438 tcp_offset = l2_hdr_size + IPV4_HDR_LEN;
439 } else if (ethtype == ETHERTYPE_IPV6) {
440 struct ipv6_hdr_t *ipv6_hdr;
441
442 ipv6_hdr =
443 (struct ipv6_hdr_t *)(p + l2_hdr_size);
444 ip_prot = ipv6_hdr->next_hdr;
445 tcp_offset = l2_hdr_size + IPV6_HDR_LEN;
446 } else {
447 QDF_TRACE(QDF_MODULE_ID_TXRX,
448 QDF_TRACE_LEVEL_INFO,
449 "frame %pK non-IP ethertype (%x)\n",
450 frm, ethtype);
451 goto NOT_IP_TCP;
452 }
453 if (ip_prot == IP_PROTOCOL_TCP) {
454 #if NEVERDEFINED
455 struct tcp_hdr_t *tcp_hdr;
456 uint32_t tcp_seq_num;
457
458 tcp_hdr = (struct tcp_hdr_t *)(p + tcp_offset);
459 tcp_seq_num =
460 (tcp_hdr->seq_num[0] << 24) |
461 (tcp_hdr->seq_num[1] << 16) |
462 (tcp_hdr->seq_num[1] << 8) |
463 (tcp_hdr->seq_num[1] << 0);
464 QDF_TRACE(QDF_MODULE_ID_TXRX,
465 QDF_TRACE_LEVEL_INFO,
466 "frame %pK: TCP seq num = %d\n", frm,
467 tcp_seq_num);
468 #else
469 QDF_TRACE(QDF_MODULE_ID_TXRX,
470 QDF_TRACE_LEVEL_INFO,
471 "frame %pK: TCP seq num = %d\n", frm,
472 ((*(p + tcp_offset + 4)) << 24) |
473 ((*(p + tcp_offset + 5)) << 16) |
474 ((*(p + tcp_offset + 6)) << 8) |
475 (*(p + tcp_offset + 7)));
476 #endif
477 } else {
478 QDF_TRACE(QDF_MODULE_ID_TXRX,
479 QDF_TRACE_LEVEL_INFO,
480 "frame %pK non-TCP IP protocol (%x)\n",
481 frm, ip_prot);
482 }
483 }
484 NOT_IP_TCP:
485 if (display_options & ol_txrx_frm_dump_contents) {
486 int i, frag_num, len_lim;
487
488 len_lim = max_len;
489 if (len_lim > qdf_nbuf_len(frm))
490 len_lim = qdf_nbuf_len(frm);
491 if (len_lim > TXRX_FRM_DUMP_MAX_LEN)
492 len_lim = TXRX_FRM_DUMP_MAX_LEN;
493
494 /*
495 * Gather frame contents from netbuf fragments
496 * into a contiguous buffer.
497 */
498 frag_num = 0;
499 i = 0;
500 while (i < len_lim) {
501 int frag_bytes;
502
503 frag_bytes =
504 qdf_nbuf_get_frag_len(frm, frag_num);
505 if (frag_bytes > len_lim - i)
506 frag_bytes = len_lim - i;
507 if (frag_bytes > 0) {
508 p = qdf_nbuf_get_frag_vaddr(frm,
509 frag_num);
510 qdf_mem_copy(&local_buf[i], p,
511 frag_bytes);
512 }
513 frag_num++;
514 i += frag_bytes;
515 }
516
517 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
518 "frame %pK data (%pK), hex dump of bytes 0-%d of %d:\n",
519 frm, p, len_lim - 1, (int)qdf_nbuf_len(frm));
520 p = local_buf;
521 while (len_lim > 16) {
522 QDF_TRACE(QDF_MODULE_ID_TXRX,
523 QDF_TRACE_LEVEL_INFO,
524 " " /* indent */
525 "%02x %02x %02x %02x %02x %02x %02x %02x "
526 "%02x %02x %02x %02x %02x %02x %02x %02x\n",
527 *(p + 0), *(p + 1), *(p + 2),
528 *(p + 3), *(p + 4), *(p + 5),
529 *(p + 6), *(p + 7), *(p + 8),
530 *(p + 9), *(p + 10), *(p + 11),
531 *(p + 12), *(p + 13), *(p + 14),
532 *(p + 15));
533 p += 16;
534 len_lim -= 16;
535 }
536 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
537 " " /* indent */);
538 while (len_lim > 0) {
539 QDF_TRACE(QDF_MODULE_ID_TXRX,
540 QDF_TRACE_LEVEL_INFO, "%02x ", *p);
541 p++;
542 len_lim--;
543 }
544 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
545 "\n");
546 }
547 frm = qdf_nbuf_next(frm);
548 }
549 }
550 #else
551 #define ol_txrx_frms_dump(name, pdev, frms, display_options, max_len)
552 #endif /* TXRX_DEBUG_DATA */
553
554 #ifdef SUPPORT_HOST_STATISTICS
555
556 #define OL_RX_ERR_STATISTICS(pdev, vdev, err_type, sec_type, is_mcast) \
557 ol_rx_err_statistics(pdev->ctrl_pdev, vdev->vdev_id, err_type, \
558 sec_type, is_mcast)
559
560 #define OL_RX_ERR_STATISTICS_1(pdev, vdev, peer, rx_desc, err_type) \
561 do { \
562 int is_mcast; \
563 enum htt_sec_type sec_type; \
564 is_mcast = htt_rx_msdu_is_wlan_mcast( \
565 pdev->htt_pdev, rx_desc); \
566 sec_type = peer->security[is_mcast \
567 ? txrx_sec_mcast \
568 : txrx_sec_ucast].sec_type; \
569 OL_RX_ERR_STATISTICS(pdev, vdev, err_type, \
570 pdev->sec_types[sec_type], \
571 is_mcast); \
572 } while (false)
573
574 #ifdef CONFIG_HL_SUPPORT
575
576 /**
577 * ol_rx_err_inv_get_wifi_header() - retrieve wifi header
578 * @pdev: handle to the physical device
579 * @rx_msdu: msdu of which header needs to be retrieved
580 *
581 * Return: wifi header
582 */
583 static inline
ol_rx_err_inv_get_wifi_header(struct ol_pdev_t * pdev,qdf_nbuf_t rx_msdu)584 struct ieee80211_frame *ol_rx_err_inv_get_wifi_header(
585 struct ol_pdev_t *pdev, qdf_nbuf_t rx_msdu)
586 {
587 return NULL;
588 }
589 #else
590
591 static inline
ol_rx_err_inv_get_wifi_header(struct ol_pdev_t * pdev,qdf_nbuf_t rx_msdu)592 struct ieee80211_frame *ol_rx_err_inv_get_wifi_header(
593 struct ol_pdev_t *pdev, qdf_nbuf_t rx_msdu)
594 {
595 struct ieee80211_frame *wh = NULL;
596
597 if (ol_cfg_frame_type(pdev) == wlan_frm_fmt_native_wifi)
598 /* For windows, it is always native wifi header .*/
599 wh = (struct ieee80211_frame *)qdf_nbuf_data(rx_msdu);
600
601 return wh;
602 }
603 #endif
604
605 #define OL_RX_ERR_INV_PEER_STATISTICS(pdev, rx_msdu) \
606 do { \
607 struct ieee80211_frame *wh = NULL; \
608 /*FIX THIS : */ \
609 /* Here htt_rx_mpdu_wifi_hdr_retrieve should be used. */ \
610 /*But at present it seems it does not work.*/ \
611 /*wh = (struct ieee80211_frame *) */ \
612 /*htt_rx_mpdu_wifi_hdr_retrieve(pdev->htt_pdev, rx_desc);*/ \
613 /* this only apply to LL device.*/ \
614 wh = ol_rx_err_inv_get_wifi_header(pdev->ctrl_pdev, rx_msdu); \
615 ol_rx_err_inv_peer_statistics(pdev->ctrl_pdev, \
616 wh, OL_RX_ERR_UNKNOWN_PEER); \
617 } while (false)
618
619 #define OL_RX_ERR_STATISTICS_2(pdev, vdev, peer, rx_desc, rx_msdu, rx_status) \
620 do { \
621 enum ol_rx_err_type err_type = OL_RX_ERR_NONE; \
622 if (rx_status == htt_rx_status_decrypt_err) \
623 err_type = OL_RX_ERR_DECRYPT; \
624 else if (rx_status == htt_rx_status_tkip_mic_err) \
625 err_type = OL_RX_ERR_TKIP_MIC; \
626 else if (rx_status == htt_rx_status_mpdu_length_err) \
627 err_type = OL_RX_ERR_MPDU_LENGTH; \
628 else if (rx_status == htt_rx_status_mpdu_encrypt_required_err) \
629 err_type = OL_RX_ERR_ENCRYPT_REQUIRED; \
630 else if (rx_status == htt_rx_status_err_dup) \
631 err_type = OL_RX_ERR_DUP; \
632 else if (rx_status == htt_rx_status_err_fcs) \
633 err_type = OL_RX_ERR_FCS; \
634 else \
635 err_type = OL_RX_ERR_UNKNOWN; \
636 \
637 if (vdev && peer) { \
638 OL_RX_ERR_STATISTICS_1(pdev, vdev, peer, \
639 rx_mpdu_desc, err_type); \
640 } else { \
641 OL_RX_ERR_INV_PEER_STATISTICS(pdev, rx_msdu); \
642 } \
643 } while (false)
644 #else
645 #define OL_RX_ERR_STATISTICS(pdev, vdev, err_type, sec_type, is_mcast)
646 #define OL_RX_ERR_STATISTICS_1(pdev, vdev, peer, rx_desc, err_type)
647 #define OL_RX_ERR_STATISTICS_2(pdev, vdev, peer, rx_desc, rx_msdu, rx_status)
648 #endif /* SUPPORT_HOST_STATISTICS */
649
650 #ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
651 #define OL_TXRX_PEER_STATS_UPDATE_BASE(peer, tx_or_rx, type, msdu) \
652 do { \
653 qdf_spin_lock_bh(&peer->vdev->pdev->peer_stat_mutex); \
654 peer->stats.tx_or_rx.frms.type += 1; \
655 peer->stats.tx_or_rx.bytes.type += qdf_nbuf_len(msdu); \
656 qdf_spin_unlock_bh(&peer->vdev->pdev->peer_stat_mutex); \
657 } while (0)
658 #define OL_TXRX_PEER_STATS_UPDATE(peer, tx_or_rx, msdu) \
659 do { \
660 struct ol_txrx_vdev_t *vdev = peer->vdev; \
661 struct ol_txrx_pdev_t *pdev = vdev->pdev; \
662 uint8_t *dest_addr; \
663 if (pdev->frame_format == wlan_frm_fmt_802_3) { \
664 dest_addr = qdf_nbuf_data(msdu); \
665 } else { /* 802.11 format */ \
666 struct ieee80211_frame *frm; \
667 frm = (struct ieee80211_frame *) qdf_nbuf_data(msdu); \
668 if (vdev->opmode == wlan_op_mode_ap) { \
669 dest_addr = (uint8_t *) &(frm->i_addr1[0]); \
670 } else { \
671 dest_addr = (uint8_t *) &(frm->i_addr3[0]); \
672 } \
673 } \
674 if (qdf_unlikely(QDF_IS_ADDR_BROADCAST(dest_addr))) { \
675 OL_TXRX_PEER_STATS_UPDATE_BASE(peer, tx_or_rx, \
676 bcast, msdu); \
677 } else if (qdf_unlikely(IEEE80211_IS_MULTICAST(dest_addr))) { \
678 OL_TXRX_PEER_STATS_UPDATE_BASE(peer, tx_or_rx, \
679 mcast, msdu); \
680 } else { \
681 OL_TXRX_PEER_STATS_UPDATE_BASE(peer, tx_or_rx, \
682 ucast, msdu); \
683 } \
684 } while (0)
685 #define OL_TX_PEER_STATS_UPDATE(peer, msdu) \
686 OL_TXRX_PEER_STATS_UPDATE(peer, tx, msdu)
687 #define OL_RX_PEER_STATS_UPDATE(peer, msdu) \
688 OL_TXRX_PEER_STATS_UPDATE(peer, rx, msdu)
689 #define OL_TXRX_PEER_STATS_MUTEX_INIT(pdev) \
690 qdf_spinlock_create(&pdev->peer_stat_mutex)
691 #define OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev) \
692 qdf_spinlock_destroy(&pdev->peer_stat_mutex)
693 #else
694 #define OL_TX_PEER_STATS_UPDATE(peer, msdu) /* no-op */
695 #define OL_RX_PEER_STATS_UPDATE(peer, msdu) /* no-op */
696 #define OL_TXRX_PEER_STATS_MUTEX_INIT(peer) /* no-op */
697 #define OL_TXRX_PEER_STATS_MUTEX_DESTROY(peer) /* no-op */
698 #endif
699
700 #ifndef DEBUG_HTT_CREDIT
701 #define DEBUG_HTT_CREDIT 0
702 #endif
703
704 #if defined(FEATURE_TSO_DEBUG)
705 #define TXRX_STATS_TSO_HISTOGRAM(_pdev, _p_cntrs) \
706 do { \
707 if (_p_cntrs == 1) { \
708 TXRX_STATS_ADD(_pdev, pub.tx.tso.tso_hist.pkts_1, 1); \
709 } else if (_p_cntrs >= 2 && _p_cntrs <= 5) { \
710 TXRX_STATS_ADD(_pdev, \
711 pub.tx.tso.tso_hist.pkts_2_5, 1); \
712 } else if (_p_cntrs > 5 && _p_cntrs <= 10) { \
713 TXRX_STATS_ADD(_pdev, \
714 pub.tx.tso.tso_hist.pkts_6_10, 1); \
715 } else if (_p_cntrs > 10 && _p_cntrs <= 15) { \
716 TXRX_STATS_ADD(_pdev, \
717 pub.tx.tso.tso_hist.pkts_11_15, 1); \
718 } else if (_p_cntrs > 15 && _p_cntrs <= 20) { \
719 TXRX_STATS_ADD(_pdev, \
720 pub.tx.tso.tso_hist.pkts_16_20, 1); \
721 } else if (_p_cntrs > 20) { \
722 TXRX_STATS_ADD(_pdev, \
723 pub.tx.tso.tso_hist.pkts_20_plus, 1); \
724 } \
725 } while (0)
726
727 #define TXRX_STATS_TSO_RESET_MSDU(pdev, idx) \
728 do { \
729 pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].num_seg \
730 = 0; \
731 pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].tso_seg_idx \
732 = 0; \
733 } while (0)
734
735 #define TXRX_STATS_TSO_MSDU_IDX(pdev) \
736 pdev->stats.pub.tx.tso.tso_info.tso_msdu_idx
737
738 #define TXRX_STATS_TSO_MSDU(pdev, idx) \
739 pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx]
740
741 #define TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, idx) \
742 pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].num_seg
743
744 #define TXRX_STATS_TSO_MSDU_GSO_SIZE(pdev, idx) \
745 pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].gso_size
746
747 #define TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, idx) \
748 pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].total_len
749
750 #define TXRX_STATS_TSO_MSDU_NR_FRAGS(pdev, idx) \
751 pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].nr_frags
752
753 #define TXRX_STATS_TSO_CURR_MSDU(pdev, idx) \
754 TXRX_STATS_TSO_MSDU(pdev, idx)
755
756 #define TXRX_STATS_TSO_SEG_IDX(pdev, idx) \
757 TXRX_STATS_TSO_CURR_MSDU(pdev, idx).tso_seg_idx
758
759 #define TXRX_STATS_TSO_INC_SEG(pdev, idx) \
760 do { \
761 TXRX_STATS_TSO_CURR_MSDU(pdev, idx).num_seg++; \
762 TXRX_STATS_TSO_CURR_MSDU(pdev, idx).num_seg &= \
763 NUM_MAX_TSO_SEGS_MASK; \
764 } while (0)
765
766 #define TXRX_STATS_TSO_RST_SEG(pdev, idx) \
767 TXRX_STATS_TSO_CURR_MSDU(pdev, idx).num_seg = 0
768
769 #define TXRX_STATS_TSO_RST_SEG_IDX(pdev, idx) \
770 TXRX_STATS_TSO_CURR_MSDU(pdev, idx).tso_seg_idx = 0
771
772 #define TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx) \
773 TXRX_STATS_TSO_MSDU(pdev, msdu_idx).tso_segs[seg_idx]
774
775 #define TXRX_STATS_TSO_CURR_SEG(pdev, idx) \
776 TXRX_STATS_TSO_SEG(pdev, idx, \
777 TXRX_STATS_TSO_SEG_IDX(pdev, idx)) \
778
779 #define TXRX_STATS_TSO_INC_SEG_IDX(pdev, idx) \
780 do { \
781 TXRX_STATS_TSO_SEG_IDX(pdev, idx)++; \
782 TXRX_STATS_TSO_SEG_IDX(pdev, idx) &= NUM_MAX_TSO_SEGS_MASK; \
783 } while (0)
784
785 #define TXRX_STATS_TSO_SEG_UPDATE(pdev, idx, tso_seg) \
786 (TXRX_STATS_TSO_CURR_SEG(pdev, idx) = tso_seg)
787
788 #define TXRX_STATS_TSO_GSO_SIZE_UPDATE(pdev, idx, size) \
789 (TXRX_STATS_TSO_CURR_MSDU(pdev, idx).gso_size = size)
790
791 #define TXRX_STATS_TSO_TOTAL_LEN_UPDATE(pdev, idx, len) \
792 (TXRX_STATS_TSO_CURR_MSDU(pdev, idx).total_len = len)
793
794 #define TXRX_STATS_TSO_NUM_FRAGS_UPDATE(pdev, idx, frags) \
795 (TXRX_STATS_TSO_CURR_MSDU(pdev, idx).nr_frags = frags)
796
797 #else
798 #define TXRX_STATS_TSO_HISTOGRAM(_pdev, _p_cntrs) /* no-op */
799 #define TXRX_STATS_TSO_RESET_MSDU(pdev, idx) /* no-op */
800 #define TXRX_STATS_TSO_MSDU_IDX(pdev) /* no-op */
801 #define TXRX_STATS_TSO_MSDU(pdev, idx) /* no-op */
802 #define TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, idx) /* no-op */
803 #define TXRX_STATS_TSO_CURR_MSDU(pdev, idx) /* no-op */
804 #define TXRX_STATS_TSO_INC_MSDU_IDX(pdev) /* no-op */
805 #define TXRX_STATS_TSO_SEG_IDX(pdev, idx) /* no-op */
806 #define TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx) /* no-op */
807 #define TXRX_STATS_TSO_CURR_SEG(pdev, idx) /* no-op */
808 #define TXRX_STATS_TSO_INC_SEG_IDX(pdev, idx) /* no-op */
809 #define TXRX_STATS_TSO_SEG_UPDATE(pdev, idx, tso_seg) /* no-op */
810 #define TXRX_STATS_TSO_INC_SEG(pdev, idx) /* no-op */
811 #define TXRX_STATS_TSO_RST_SEG(pdev, idx) /* no-op */
812 #define TXRX_STATS_TSO_RST_SEG_IDX(pdev, idx) /* no-op */
813 #define TXRX_STATS_TSO_GSO_SIZE_UPDATE(pdev, idx, size) /* no-op */
814 #define TXRX_STATS_TSO_TOTAL_LEN_UPDATE(pdev, idx, len) /* no-op */
815 #define TXRX_STATS_TSO_NUM_FRAGS_UPDATE(pdev, idx, frags) /* no-op */
816 #define TXRX_STATS_TSO_MSDU_GSO_SIZE(pdev, idx) /* no-op */
817 #define TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, idx) /* no-op */
818 #define TXRX_STATS_TSO_MSDU_NR_FRAGS(pdev, idx) /* no-op */
819
820 #endif /* FEATURE_TSO_DEBUG */
821
822 #ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
823
824 void
825 ol_txrx_update_group_credit(
826 struct ol_tx_queue_group_t *group,
827 int32_t credit,
828 u_int8_t absolute);
829 #endif
830
831 #endif /* _OL_TXRX_INTERNAL__H_ */
832