1 /*
2 * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /**
21 * DOC: qdf_nbuf.c
22 * QCA driver framework(QDF) network buffer management APIs
23 */
24 #include <linux/hashtable.h>
25 #include <linux/kernel.h>
26 #include <linux/version.h>
27 #include <linux/skbuff.h>
28 #include <linux/module.h>
29 #include <linux/proc_fs.h>
30 #include <linux/inetdevice.h>
31 #include <qdf_atomic.h>
32 #include <qdf_debugfs.h>
33 #include <qdf_lock.h>
34 #include <qdf_mem.h>
35 #include <qdf_module.h>
36 #include <qdf_nbuf.h>
37 #include <qdf_status.h>
38 #include "qdf_str.h"
39 #include <qdf_trace.h>
40 #include "qdf_tracker.h"
41 #include <qdf_types.h>
42 #include <net/ieee80211_radiotap.h>
43 #include <pld_common.h>
44 #include <qdf_crypto.h>
45 #include <linux/igmp.h>
46 #include <net/mld.h>
47
48 #if defined(FEATURE_TSO)
49 #include <net/ipv6.h>
50 #include <linux/ipv6.h>
51 #include <linux/tcp.h>
52 #include <linux/if_vlan.h>
53 #include <linux/ip.h>
54 #endif /* FEATURE_TSO */
55
56 #ifdef IPA_OFFLOAD
57 #include <i_qdf_ipa_wdi3.h>
58 #endif /* IPA_OFFLOAD */
59 #include "qdf_ssr_driver_dump.h"
60
61 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
62
63 #define qdf_nbuf_users_inc atomic_inc
64 #define qdf_nbuf_users_dec atomic_dec
65 #define qdf_nbuf_users_set atomic_set
66 #define qdf_nbuf_users_read atomic_read
67 #else
68 #define qdf_nbuf_users_inc refcount_inc
69 #define qdf_nbuf_users_dec refcount_dec
70 #define qdf_nbuf_users_set refcount_set
71 #define qdf_nbuf_users_read refcount_read
72 #endif /* KERNEL_VERSION(4, 13, 0) */
73
74 #define IEEE80211_RADIOTAP_VHT_BW_20 0
75 #define IEEE80211_RADIOTAP_VHT_BW_40 1
76 #define IEEE80211_RADIOTAP_VHT_BW_80 2
77 #define IEEE80211_RADIOTAP_VHT_BW_160 3
78
79 #define RADIOTAP_VHT_BW_20 0
80 #define RADIOTAP_VHT_BW_40 1
81 #define RADIOTAP_VHT_BW_80 4
82 #define RADIOTAP_VHT_BW_160 11
83
84 /* tx status */
85 #define RADIOTAP_TX_STATUS_FAIL 1
86 #define RADIOTAP_TX_STATUS_NOACK 2
87
88 /* channel number to freq conversion */
89 #define CHANNEL_NUM_14 14
90 #define CHANNEL_NUM_15 15
91 #define CHANNEL_NUM_27 27
92 #define CHANNEL_NUM_35 35
93 #define CHANNEL_NUM_182 182
94 #define CHANNEL_NUM_197 197
95 #define CHANNEL_FREQ_2484 2484
96 #define CHANNEL_FREQ_2407 2407
97 #define CHANNEL_FREQ_2512 2512
98 #define CHANNEL_FREQ_5000 5000
99 #define CHANNEL_FREQ_4000 4000
100 #define CHANNEL_FREQ_5150 5150
101 #define FREQ_MULTIPLIER_CONST_5MHZ 5
102 #define FREQ_MULTIPLIER_CONST_20MHZ 20
103 #define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100
104 #define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080
105 #define RADIOTAP_CCK_CHANNEL 0x0020
106 #define RADIOTAP_OFDM_CHANNEL 0x0040
107
108 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
109 #include <qdf_mc_timer.h>
110
111 struct qdf_track_timer {
112 qdf_mc_timer_t track_timer;
113 qdf_atomic_t alloc_fail_cnt;
114 };
115
116 static struct qdf_track_timer alloc_track_timer;
117
118 #define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS 5000
119 #define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD 50
120 #endif
121
122 #ifdef NBUF_MEMORY_DEBUG
123 /* SMMU crash indication*/
124 static qdf_atomic_t smmu_crashed;
125 /* Number of nbuf not added to history*/
126 unsigned long g_histroy_add_drop;
127 #endif
128
129 /* Packet Counter */
130 static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
131 static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
132 #ifdef QDF_NBUF_GLOBAL_COUNT
133 #define NBUF_DEBUGFS_NAME "nbuf_counters"
134 static qdf_atomic_t nbuf_count;
135 #endif
136
137 #if defined(NBUF_MEMORY_DEBUG) || defined(QDF_NBUF_GLOBAL_COUNT)
138 static bool is_initial_mem_debug_disabled;
139 #endif
140
141 /**
142 * __qdf_nbuf_get_ip_offset() - Get IPV4/V6 header offset
143 * @data: Pointer to network data buffer
144 *
145 * Get the IP header offset in case of 8021Q and 8021AD
146 * tag is present in L2 header.
147 *
148 * Return: IP header offset
149 */
__qdf_nbuf_get_ip_offset(uint8_t * data)150 static inline uint8_t __qdf_nbuf_get_ip_offset(uint8_t *data)
151 {
152 uint16_t ether_type;
153
154 ether_type = *(uint16_t *)(data +
155 QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
156
157 if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q)))
158 return QDF_NBUF_TRAC_VLAN_IP_OFFSET;
159 else if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021AD)))
160 return QDF_NBUF_TRAC_DOUBLE_VLAN_IP_OFFSET;
161
162 return QDF_NBUF_TRAC_IP_OFFSET;
163 }
164
165 /**
166 * __qdf_nbuf_get_ether_type() - Get the ether type
167 * @data: Pointer to network data buffer
168 *
169 * Get the ether type in case of 8021Q and 8021AD tag
170 * is present in L2 header, e.g for the returned ether type
171 * value, if IPV4 data ether type 0x0800, return 0x0008.
172 *
173 * Return ether type.
174 */
__qdf_nbuf_get_ether_type(uint8_t * data)175 static inline uint16_t __qdf_nbuf_get_ether_type(uint8_t *data)
176 {
177 uint16_t ether_type;
178
179 ether_type = *(uint16_t *)(data +
180 QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
181
182 if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q)))
183 ether_type = *(uint16_t *)(data +
184 QDF_NBUF_TRAC_VLAN_ETH_TYPE_OFFSET);
185 else if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021AD)))
186 ether_type = *(uint16_t *)(data +
187 QDF_NBUF_TRAC_DOUBLE_VLAN_ETH_TYPE_OFFSET);
188
189 return ether_type;
190 }
191
qdf_nbuf_tx_desc_count_display(void)192 void qdf_nbuf_tx_desc_count_display(void)
193 {
194 qdf_debug("Current Snapshot of the Driver:");
195 qdf_debug("Data Packets:");
196 qdf_debug("HDD %d TXRX_Q %d TXRX %d HTT %d",
197 nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
198 (nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
199 nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
200 nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
201 nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
202 nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
203 nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
204 nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
205 nbuf_tx_data[QDF_NBUF_TX_PKT_HTT] -
206 nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
207 qdf_debug(" HTC %d HIF %d CE %d TX_COMP %d",
208 nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
209 nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
210 nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
211 nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
212 nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
213 nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
214 nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
215 qdf_debug("Mgmt Packets:");
216 qdf_debug("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d",
217 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
218 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
219 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
220 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
221 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
222 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
223 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
224 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
225 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
226 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
227 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
228 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
229 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
230 }
231 qdf_export_symbol(qdf_nbuf_tx_desc_count_display);
232
233 /**
234 * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
235 * @packet_type : packet type either mgmt/data
236 * @current_state : layer at which the packet currently present
237 *
238 * Return: none
239 */
qdf_nbuf_tx_desc_count_update(uint8_t packet_type,uint8_t current_state)240 static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
241 uint8_t current_state)
242 {
243 switch (packet_type) {
244 case QDF_NBUF_TX_PKT_MGMT_TRACK:
245 nbuf_tx_mgmt[current_state]++;
246 break;
247 case QDF_NBUF_TX_PKT_DATA_TRACK:
248 nbuf_tx_data[current_state]++;
249 break;
250 default:
251 break;
252 }
253 }
254
qdf_nbuf_tx_desc_count_clear(void)255 void qdf_nbuf_tx_desc_count_clear(void)
256 {
257 memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
258 memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
259 }
260 qdf_export_symbol(qdf_nbuf_tx_desc_count_clear);
261
qdf_nbuf_set_state(qdf_nbuf_t nbuf,uint8_t current_state)262 void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
263 {
264 /*
265 * Only Mgmt, Data Packets are tracked. WMI messages
266 * such as scan commands are not tracked
267 */
268 uint8_t packet_type;
269
270 packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
271
272 if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
273 (packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
274 return;
275 }
276 QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
277 qdf_nbuf_tx_desc_count_update(packet_type,
278 current_state);
279 }
280 qdf_export_symbol(qdf_nbuf_set_state);
281
282 #ifdef FEATURE_NBUFF_REPLENISH_TIMER
283 /**
284 * __qdf_nbuf_start_replenish_timer() - Start alloc fail replenish timer
285 *
286 * This function starts the alloc fail replenish timer.
287 *
288 * Return: void
289 */
__qdf_nbuf_start_replenish_timer(void)290 static inline void __qdf_nbuf_start_replenish_timer(void)
291 {
292 qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt);
293 if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) !=
294 QDF_TIMER_STATE_RUNNING)
295 qdf_mc_timer_start(&alloc_track_timer.track_timer,
296 QDF_NBUF_ALLOC_EXPIRE_TIMER_MS);
297 }
298
299 /**
300 * __qdf_nbuf_stop_replenish_timer() - Stop alloc fail replenish timer
301 *
302 * This function stops the alloc fail replenish timer.
303 *
304 * Return: void
305 */
__qdf_nbuf_stop_replenish_timer(void)306 static inline void __qdf_nbuf_stop_replenish_timer(void)
307 {
308 if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0)
309 return;
310
311 qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0);
312 if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) ==
313 QDF_TIMER_STATE_RUNNING)
314 qdf_mc_timer_stop(&alloc_track_timer.track_timer);
315 }
316
317 /**
318 * qdf_replenish_expire_handler() - Replenish expire handler
319 * @arg: unused callback argument
320 *
321 * This function triggers when the alloc fail replenish timer expires.
322 *
323 * Return: void
324 */
qdf_replenish_expire_handler(void * arg)325 static void qdf_replenish_expire_handler(void *arg)
326 {
327 if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) >
328 QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) {
329 qdf_print("ERROR: NBUF allocation timer expired Fail count %d",
330 qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt));
331
332 /* Error handling here */
333 }
334 }
335
__qdf_nbuf_init_replenish_timer(void)336 void __qdf_nbuf_init_replenish_timer(void)
337 {
338 qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW,
339 qdf_replenish_expire_handler, NULL);
340 }
341
__qdf_nbuf_deinit_replenish_timer(void)342 void __qdf_nbuf_deinit_replenish_timer(void)
343 {
344 __qdf_nbuf_stop_replenish_timer();
345 qdf_mc_timer_destroy(&alloc_track_timer.track_timer);
346 }
347
qdf_nbuf_stop_replenish_timer(void)348 void qdf_nbuf_stop_replenish_timer(void)
349 {
350 __qdf_nbuf_stop_replenish_timer();
351 }
352 #else
353
__qdf_nbuf_start_replenish_timer(void)354 static inline void __qdf_nbuf_start_replenish_timer(void) {}
__qdf_nbuf_stop_replenish_timer(void)355 static inline void __qdf_nbuf_stop_replenish_timer(void) {}
qdf_nbuf_stop_replenish_timer(void)356 void qdf_nbuf_stop_replenish_timer(void)
357 {
358 }
359 #endif
360
361 /* globals do not need to be initialized to NULL/0 */
362 qdf_nbuf_trace_update_t qdf_trace_update_cb;
363 qdf_nbuf_free_t nbuf_free_cb;
364
365 #ifdef QDF_NBUF_GLOBAL_COUNT
366
__qdf_nbuf_count_get(void)367 int __qdf_nbuf_count_get(void)
368 {
369 return qdf_atomic_read(&nbuf_count);
370 }
371 qdf_export_symbol(__qdf_nbuf_count_get);
372
__qdf_nbuf_count_inc(qdf_nbuf_t nbuf)373 void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf)
374 {
375 int num_nbuf = 1;
376 qdf_nbuf_t ext_list;
377
378 if (qdf_likely(is_initial_mem_debug_disabled))
379 return;
380
381 ext_list = qdf_nbuf_get_ext_list(nbuf);
382
383 /* Take care to account for frag_list */
384 while (ext_list) {
385 ++num_nbuf;
386 ext_list = qdf_nbuf_queue_next(ext_list);
387 }
388
389 qdf_atomic_add(num_nbuf, &nbuf_count);
390 }
391 qdf_export_symbol(__qdf_nbuf_count_inc);
392
__qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)393 void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)
394 {
395 qdf_nbuf_t ext_list;
396 int num_nbuf;
397
398 if (qdf_likely(is_initial_mem_debug_disabled))
399 return;
400
401 if (qdf_nbuf_get_users(nbuf) > 1)
402 return;
403
404 num_nbuf = 1;
405
406 /* Take care to account for frag_list */
407 ext_list = qdf_nbuf_get_ext_list(nbuf);
408 while (ext_list) {
409 if (qdf_nbuf_get_users(ext_list) == 1)
410 ++num_nbuf;
411 ext_list = qdf_nbuf_queue_next(ext_list);
412 }
413
414 qdf_atomic_sub(num_nbuf, &nbuf_count);
415 }
416 qdf_export_symbol(__qdf_nbuf_count_dec);
417 #endif
418
419 #ifdef NBUF_FRAG_MEMORY_DEBUG
qdf_nbuf_frag_count_inc(qdf_nbuf_t nbuf)420 void qdf_nbuf_frag_count_inc(qdf_nbuf_t nbuf)
421 {
422 qdf_nbuf_t ext_list;
423 uint32_t num_nr_frags;
424 uint32_t total_num_nr_frags;
425
426 if (qdf_likely(is_initial_mem_debug_disabled))
427 return;
428
429 num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
430 qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
431
432 total_num_nr_frags = num_nr_frags;
433
434 /* Take into account the frags attached to frag_list */
435 ext_list = qdf_nbuf_get_ext_list(nbuf);
436 while (ext_list) {
437 num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
438 qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
439 total_num_nr_frags += num_nr_frags;
440 ext_list = qdf_nbuf_queue_next(ext_list);
441 }
442
443 qdf_frag_count_inc(total_num_nr_frags);
444 }
445
446 qdf_export_symbol(qdf_nbuf_frag_count_inc);
447
qdf_nbuf_frag_count_dec(qdf_nbuf_t nbuf)448 void qdf_nbuf_frag_count_dec(qdf_nbuf_t nbuf)
449 {
450 qdf_nbuf_t ext_list;
451 uint32_t num_nr_frags;
452 uint32_t total_num_nr_frags;
453
454 if (qdf_likely(is_initial_mem_debug_disabled))
455 return;
456
457 if (qdf_nbuf_get_users(nbuf) > 1)
458 return;
459
460 num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
461 qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
462
463 total_num_nr_frags = num_nr_frags;
464
465 /* Take into account the frags attached to frag_list */
466 ext_list = qdf_nbuf_get_ext_list(nbuf);
467 while (ext_list) {
468 if (qdf_nbuf_get_users(ext_list) == 1) {
469 num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
470 qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
471 total_num_nr_frags += num_nr_frags;
472 }
473 ext_list = qdf_nbuf_queue_next(ext_list);
474 }
475
476 qdf_frag_count_dec(total_num_nr_frags);
477 }
478
479 qdf_export_symbol(qdf_nbuf_frag_count_dec);
480
481 #endif
482
483 static inline void
qdf_nbuf_set_defaults(struct sk_buff * skb,int align,int reserve)484 qdf_nbuf_set_defaults(struct sk_buff *skb, int align, int reserve)
485 {
486 unsigned long offset;
487
488 memset(skb->cb, 0x0, sizeof(skb->cb));
489 skb->dev = NULL;
490
491 /*
492 * The default is for netbuf fragments to be interpreted
493 * as wordstreams rather than bytestreams.
494 */
495 QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
496 QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
497
498 /*
499 * XXX:how about we reserve first then align
500 * Align & make sure that the tail & data are adjusted properly
501 */
502
503 if (align) {
504 offset = ((unsigned long)skb->data) % align;
505 if (offset)
506 skb_reserve(skb, align - offset);
507 }
508
509 /*
510 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
511 * pointer
512 */
513 skb_reserve(skb, reserve);
514 qdf_nbuf_count_inc(skb);
515 }
516
517 #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
518 !defined(QCA_WIFI_QCN9000)
__qdf_nbuf_alloc(qdf_device_t osdev,size_t size,int reserve,int align,int prio,const char * func,uint32_t line)519 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
520 int align, int prio, const char *func,
521 uint32_t line)
522 {
523 struct sk_buff *skb;
524 uint32_t lowmem_alloc_tries = 0;
525
526 if (align)
527 size += (align - 1);
528
529 realloc:
530 skb = dev_alloc_skb(size);
531
532 if (skb)
533 goto skb_alloc;
534
535 skb = pld_nbuf_pre_alloc(size);
536
537 if (!skb) {
538 qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
539 size, func, line);
540 return NULL;
541 }
542
543 skb_alloc:
544 /* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040
545 * Though we are trying to reserve low memory upfront to prevent this,
546 * we sometimes see SKBs allocated from low memory.
547 */
548 if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) {
549 lowmem_alloc_tries++;
550 if (lowmem_alloc_tries > 100) {
551 qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d",
552 size, func, line);
553 return NULL;
554 } else {
555 /* Not freeing to make sure it
556 * will not get allocated again
557 */
558 goto realloc;
559 }
560 }
561
562 qdf_nbuf_set_defaults(skb, align, reserve);
563
564 return skb;
565 }
566 #else
567
568 #ifdef QCA_DP_NBUF_FAST_RECYCLE_CHECK
__qdf_nbuf_alloc(qdf_device_t osdev,size_t size,int reserve,int align,int prio,const char * func,uint32_t line)569 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
570 int align, int prio, const char *func,
571 uint32_t line)
572 {
573 return __qdf_nbuf_frag_alloc(osdev, size, reserve, align, prio, func,
574 line);
575 }
576
577 #else
__qdf_nbuf_alloc(qdf_device_t osdev,size_t size,int reserve,int align,int prio,const char * func,uint32_t line)578 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
579 int align, int prio, const char *func,
580 uint32_t line)
581 {
582 struct sk_buff *skb;
583 int flags = GFP_KERNEL;
584
585 if (align)
586 size += (align - 1);
587
588 if (in_interrupt() || irqs_disabled() || in_atomic())
589 flags = GFP_ATOMIC;
590
591 skb = alloc_skb(size, flags);
592
593 if (skb)
594 goto skb_alloc;
595
596 skb = pld_nbuf_pre_alloc(size);
597
598 if (!skb) {
599 qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
600 size, func, line);
601 __qdf_nbuf_start_replenish_timer();
602 return NULL;
603 }
604
605 __qdf_nbuf_stop_replenish_timer();
606
607 skb_alloc:
608 qdf_nbuf_set_defaults(skb, align, reserve);
609
610 return skb;
611 }
612 #endif
613
614 #endif
615 qdf_export_symbol(__qdf_nbuf_alloc);
616
__qdf_nbuf_frag_alloc(qdf_device_t osdev,size_t size,int reserve,int align,int prio,const char * func,uint32_t line)617 struct sk_buff *__qdf_nbuf_frag_alloc(qdf_device_t osdev, size_t size,
618 int reserve, int align, int prio,
619 const char *func, uint32_t line)
620 {
621 struct sk_buff *skb;
622 int flags = GFP_KERNEL & ~__GFP_DIRECT_RECLAIM;
623 bool atomic = false;
624
625 if (align)
626 size += (align - 1);
627
628 if (in_interrupt() || irqs_disabled() || in_atomic()) {
629 atomic = true;
630 flags = GFP_ATOMIC;
631 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
632 /*
633 * Observed that kcompactd burns out CPU to make order-3 page.
634 *__netdev_alloc_skb has 4k page fallback option just in case of
635 * failing high order page allocation so we don't need to be
636 * hard. Make kcompactd rest in piece.
637 */
638 flags = flags & ~__GFP_KSWAPD_RECLAIM;
639 #endif
640 }
641
642 skb = __netdev_alloc_skb(NULL, size, flags);
643 if (skb)
644 goto skb_alloc;
645
646 /* 32k page frag alloc failed, try page slab allocation */
647 if (likely(!atomic))
648 flags |= __GFP_DIRECT_RECLAIM;
649
650 skb = alloc_skb(size, flags);
651 if (skb)
652 goto skb_alloc;
653
654 skb = pld_nbuf_pre_alloc(size);
655
656 if (!skb) {
657 qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
658 size, func, line);
659 __qdf_nbuf_start_replenish_timer();
660 return NULL;
661 }
662
663 __qdf_nbuf_stop_replenish_timer();
664
665 skb_alloc:
666 qdf_nbuf_set_defaults(skb, align, reserve);
667
668 return skb;
669 }
670
671 qdf_export_symbol(__qdf_nbuf_frag_alloc);
672
__qdf_nbuf_alloc_no_recycler(size_t size,int reserve,int align,const char * func,uint32_t line)673 __qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align,
674 const char *func, uint32_t line)
675 {
676 qdf_nbuf_t nbuf;
677 unsigned long offset;
678
679 if (align)
680 size += (align - 1);
681
682 nbuf = alloc_skb(size, GFP_ATOMIC);
683 if (!nbuf)
684 goto ret_nbuf;
685
686 memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
687
688 skb_reserve(nbuf, reserve);
689
690 if (align) {
691 offset = ((unsigned long)nbuf->data) % align;
692 if (offset)
693 skb_reserve(nbuf, align - offset);
694 }
695
696 qdf_nbuf_count_inc(nbuf);
697
698 ret_nbuf:
699 return nbuf;
700 }
701
702 qdf_export_symbol(__qdf_nbuf_alloc_no_recycler);
703
__qdf_nbuf_free(struct sk_buff * skb)704 void __qdf_nbuf_free(struct sk_buff *skb)
705 {
706 if (pld_nbuf_pre_alloc_free(skb))
707 return;
708
709 qdf_nbuf_frag_count_dec(skb);
710
711 qdf_nbuf_count_dec(skb);
712 if (nbuf_free_cb)
713 nbuf_free_cb(skb);
714 else
715 dev_kfree_skb_any(skb);
716 }
717
718 qdf_export_symbol(__qdf_nbuf_free);
719
__qdf_nbuf_clone(__qdf_nbuf_t skb)720 __qdf_nbuf_t __qdf_nbuf_clone(__qdf_nbuf_t skb)
721 {
722 qdf_nbuf_t skb_new = NULL;
723
724 skb_new = skb_clone(skb, GFP_ATOMIC);
725 if (skb_new) {
726 qdf_nbuf_frag_count_inc(skb_new);
727 qdf_nbuf_count_inc(skb_new);
728 }
729 return skb_new;
730 }
731
732 qdf_export_symbol(__qdf_nbuf_clone);
733
734 struct sk_buff *
__qdf_nbuf_page_frag_alloc(qdf_device_t osdev,size_t size,int reserve,int align,__qdf_frag_cache_t * pf_cache,const char * func,uint32_t line)735 __qdf_nbuf_page_frag_alloc(qdf_device_t osdev, size_t size, int reserve,
736 int align, __qdf_frag_cache_t *pf_cache,
737 const char *func, uint32_t line)
738 {
739 struct sk_buff *skb;
740 qdf_frag_t frag_data;
741 size_t orig_size = size;
742 int flags = GFP_KERNEL;
743
744 if (align)
745 size += (align - 1);
746
747 size += NET_SKB_PAD;
748 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
749 size = SKB_DATA_ALIGN(size);
750
751 if (in_interrupt() || irqs_disabled() || in_atomic())
752 flags = GFP_ATOMIC;
753
754 frag_data = page_frag_alloc(pf_cache, size, flags);
755 if (!frag_data) {
756 qdf_rl_nofl_err("page frag alloc failed %zuB @ %s:%d",
757 size, func, line);
758 return __qdf_nbuf_alloc(osdev, orig_size, reserve, align, 0,
759 func, line);
760 }
761
762 skb = build_skb(frag_data, size);
763 if (skb) {
764 skb_reserve(skb, NET_SKB_PAD);
765 goto skb_alloc;
766 }
767
768 /* Free the data allocated from pf_cache */
769 page_frag_free(frag_data);
770
771 size = orig_size + align - 1;
772
773 skb = pld_nbuf_pre_alloc(size);
774 if (!skb) {
775 qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
776 size, func, line);
777 __qdf_nbuf_start_replenish_timer();
778 return NULL;
779 }
780
781 __qdf_nbuf_stop_replenish_timer();
782
783 skb_alloc:
784 qdf_nbuf_set_defaults(skb, align, reserve);
785
786 return skb;
787 }
788
789 qdf_export_symbol(__qdf_nbuf_page_frag_alloc);
790
791 #ifdef QCA_DP_TX_NBUF_LIST_FREE
792 void
__qdf_nbuf_dev_kfree_list(__qdf_nbuf_queue_head_t * nbuf_queue_head)793 __qdf_nbuf_dev_kfree_list(__qdf_nbuf_queue_head_t *nbuf_queue_head)
794 {
795 dev_kfree_skb_list_fast(nbuf_queue_head);
796 }
797 #else
798 void
__qdf_nbuf_dev_kfree_list(__qdf_nbuf_queue_head_t * nbuf_queue_head)799 __qdf_nbuf_dev_kfree_list(__qdf_nbuf_queue_head_t *nbuf_queue_head)
800 {
801 }
802 #endif
803
804 qdf_export_symbol(__qdf_nbuf_dev_kfree_list);
805
806 #ifdef NBUF_MEMORY_DEBUG
807 struct qdf_nbuf_event {
808 qdf_nbuf_t nbuf;
809 char func[QDF_MEM_FUNC_NAME_SIZE];
810 uint32_t line;
811 enum qdf_nbuf_event_type type;
812 uint64_t timestamp;
813 qdf_dma_addr_t iova;
814 };
815
816 #ifndef QDF_NBUF_HISTORY_SIZE
817 #define QDF_NBUF_HISTORY_SIZE 4096
818 #endif
819 static qdf_atomic_t qdf_nbuf_history_index;
820 static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE];
821
qdf_nbuf_ssr_register_region(void)822 void qdf_nbuf_ssr_register_region(void)
823 {
824 qdf_ssr_driver_dump_register_region("qdf_nbuf_history",
825 qdf_nbuf_history,
826 sizeof(qdf_nbuf_history));
827 }
828
829 qdf_export_symbol(qdf_nbuf_ssr_register_region);
830
qdf_nbuf_ssr_unregister_region(void)831 void qdf_nbuf_ssr_unregister_region(void)
832 {
833 qdf_ssr_driver_dump_unregister_region("qdf_nbuf_history");
834 }
835
836 qdf_export_symbol(qdf_nbuf_ssr_unregister_region);
837
qdf_nbuf_circular_index_next(qdf_atomic_t * index,int size)838 static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size)
839 {
840 int32_t next = qdf_atomic_inc_return(index);
841
842 if (next == size)
843 qdf_atomic_sub(size, index);
844
845 return next % size;
846 }
847
848 void
qdf_nbuf_history_add(qdf_nbuf_t nbuf,const char * func,uint32_t line,enum qdf_nbuf_event_type type)849 qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *func, uint32_t line,
850 enum qdf_nbuf_event_type type)
851 {
852 int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index,
853 QDF_NBUF_HISTORY_SIZE);
854 struct qdf_nbuf_event *event = &qdf_nbuf_history[idx];
855
856 if (qdf_atomic_read(&smmu_crashed)) {
857 g_histroy_add_drop++;
858 return;
859 }
860
861 event->nbuf = nbuf;
862 qdf_str_lcopy(event->func, func, QDF_MEM_FUNC_NAME_SIZE);
863 event->line = line;
864 event->type = type;
865 event->timestamp = qdf_get_log_timestamp();
866 if (type == QDF_NBUF_MAP || type == QDF_NBUF_UNMAP ||
867 type == QDF_NBUF_SMMU_MAP || type == QDF_NBUF_SMMU_UNMAP)
868 event->iova = QDF_NBUF_CB_PADDR(nbuf);
869 else
870 event->iova = 0;
871 }
872
qdf_set_smmu_fault_state(bool smmu_fault_state)873 void qdf_set_smmu_fault_state(bool smmu_fault_state)
874 {
875 qdf_atomic_set(&smmu_crashed, smmu_fault_state);
876 if (!smmu_fault_state)
877 g_histroy_add_drop = 0;
878 }
879 qdf_export_symbol(qdf_set_smmu_fault_state);
880 #endif /* NBUF_MEMORY_DEBUG */
881
882 #ifdef NBUF_SMMU_MAP_UNMAP_DEBUG
883 #define qdf_nbuf_smmu_map_tracker_bits 11 /* 2048 buckets */
884 qdf_tracker_declare(qdf_nbuf_smmu_map_tracker, qdf_nbuf_smmu_map_tracker_bits,
885 "nbuf map-no-unmap events", "nbuf map", "nbuf unmap");
886
qdf_nbuf_smmu_map_tracking_init(void)887 static void qdf_nbuf_smmu_map_tracking_init(void)
888 {
889 qdf_tracker_init(&qdf_nbuf_smmu_map_tracker);
890 }
891
qdf_nbuf_smmu_map_tracking_deinit(void)892 static void qdf_nbuf_smmu_map_tracking_deinit(void)
893 {
894 qdf_tracker_deinit(&qdf_nbuf_smmu_map_tracker);
895 }
896
897 static QDF_STATUS
qdf_nbuf_track_smmu_map(qdf_nbuf_t nbuf,const char * func,uint32_t line)898 qdf_nbuf_track_smmu_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
899 {
900 if (is_initial_mem_debug_disabled)
901 return QDF_STATUS_SUCCESS;
902
903 return qdf_tracker_track(&qdf_nbuf_smmu_map_tracker, nbuf, func, line);
904 }
905
906 static void
qdf_nbuf_untrack_smmu_map(qdf_nbuf_t nbuf,const char * func,uint32_t line)907 qdf_nbuf_untrack_smmu_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
908 {
909 if (is_initial_mem_debug_disabled)
910 return;
911
912 qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_SMMU_UNMAP);
913 qdf_tracker_untrack(&qdf_nbuf_smmu_map_tracker, nbuf, func, line);
914 }
915
qdf_nbuf_map_check_for_smmu_leaks(void)916 void qdf_nbuf_map_check_for_smmu_leaks(void)
917 {
918 qdf_tracker_check_for_leaks(&qdf_nbuf_smmu_map_tracker);
919 }
920
921 #ifdef IPA_OFFLOAD
qdf_nbuf_smmu_map_debug(qdf_nbuf_t nbuf,uint8_t hdl,uint8_t num_buffers,qdf_mem_info_t * info,const char * func,uint32_t line)922 QDF_STATUS qdf_nbuf_smmu_map_debug(qdf_nbuf_t nbuf,
923 uint8_t hdl,
924 uint8_t num_buffers,
925 qdf_mem_info_t *info,
926 const char *func,
927 uint32_t line)
928 {
929 QDF_STATUS status;
930
931 status = qdf_nbuf_track_smmu_map(nbuf, func, line);
932 if (QDF_IS_STATUS_ERROR(status))
933 return status;
934
935 status = __qdf_ipa_wdi_create_smmu_mapping(hdl, num_buffers, info);
936
937 if (QDF_IS_STATUS_ERROR(status)) {
938 qdf_nbuf_untrack_smmu_map(nbuf, func, line);
939 } else {
940 if (!is_initial_mem_debug_disabled)
941 qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_MAP);
942 qdf_net_buf_debug_update_smmu_map_node(nbuf, info->iova,
943 info->pa, func, line);
944 }
945
946 return status;
947 }
948
949 qdf_export_symbol(qdf_nbuf_smmu_map_debug);
950
qdf_nbuf_smmu_unmap_debug(qdf_nbuf_t nbuf,uint8_t hdl,uint8_t num_buffers,qdf_mem_info_t * info,const char * func,uint32_t line)951 QDF_STATUS qdf_nbuf_smmu_unmap_debug(qdf_nbuf_t nbuf,
952 uint8_t hdl,
953 uint8_t num_buffers,
954 qdf_mem_info_t *info,
955 const char *func,
956 uint32_t line)
957 {
958 QDF_STATUS status;
959
960 qdf_nbuf_untrack_smmu_map(nbuf, func, line);
961 status = __qdf_ipa_wdi_release_smmu_mapping(hdl, num_buffers, info);
962 qdf_net_buf_debug_update_smmu_unmap_node(nbuf, info->iova,
963 info->pa, func, line);
964 return status;
965 }
966
967 qdf_export_symbol(qdf_nbuf_smmu_unmap_debug);
968 #endif /* IPA_OFFLOAD */
969
qdf_nbuf_panic_on_free_if_smmu_mapped(qdf_nbuf_t nbuf,const char * func,uint32_t line)970 static void qdf_nbuf_panic_on_free_if_smmu_mapped(qdf_nbuf_t nbuf,
971 const char *func,
972 uint32_t line)
973 {
974 char map_func[QDF_TRACKER_FUNC_SIZE];
975 uint32_t map_line;
976
977 if (!qdf_tracker_lookup(&qdf_nbuf_smmu_map_tracker, nbuf,
978 &map_func, &map_line))
979 return;
980
981 QDF_MEMDEBUG_PANIC("Nbuf freed @ %s:%u while mapped from %s:%u",
982 func, line, map_func, map_line);
983 }
984
qdf_net_buf_update_smmu_params(QDF_NBUF_TRACK * p_node)985 static inline void qdf_net_buf_update_smmu_params(QDF_NBUF_TRACK *p_node)
986 {
987 p_node->smmu_unmap_line_num = 0;
988 p_node->is_nbuf_smmu_mapped = false;
989 p_node->smmu_map_line_num = 0;
990 p_node->smmu_map_func_name[0] = '\0';
991 p_node->smmu_unmap_func_name[0] = '\0';
992 p_node->smmu_unmap_iova_addr = 0;
993 p_node->smmu_unmap_pa_addr = 0;
994 p_node->smmu_map_iova_addr = 0;
995 p_node->smmu_map_pa_addr = 0;
996 }
997 #else /* !NBUF_SMMU_MAP_UNMAP_DEBUG */
998 #ifdef NBUF_MEMORY_DEBUG
qdf_nbuf_smmu_map_tracking_init(void)999 static void qdf_nbuf_smmu_map_tracking_init(void)
1000 {
1001 }
1002
qdf_nbuf_smmu_map_tracking_deinit(void)1003 static void qdf_nbuf_smmu_map_tracking_deinit(void)
1004 {
1005 }
1006
qdf_nbuf_panic_on_free_if_smmu_mapped(qdf_nbuf_t nbuf,const char * func,uint32_t line)1007 static void qdf_nbuf_panic_on_free_if_smmu_mapped(qdf_nbuf_t nbuf,
1008 const char *func,
1009 uint32_t line)
1010 {
1011 }
1012
qdf_net_buf_update_smmu_params(QDF_NBUF_TRACK * p_node)1013 static inline void qdf_net_buf_update_smmu_params(QDF_NBUF_TRACK *p_node)
1014 {
1015 }
1016 #endif /* NBUF_MEMORY_DEBUG */
1017
1018 #ifdef IPA_OFFLOAD
qdf_nbuf_smmu_map_debug(qdf_nbuf_t nbuf,uint8_t hdl,uint8_t num_buffers,qdf_mem_info_t * info,const char * func,uint32_t line)1019 QDF_STATUS qdf_nbuf_smmu_map_debug(qdf_nbuf_t nbuf,
1020 uint8_t hdl,
1021 uint8_t num_buffers,
1022 qdf_mem_info_t *info,
1023 const char *func,
1024 uint32_t line)
1025 {
1026 return __qdf_ipa_wdi_create_smmu_mapping(hdl, num_buffers, info);
1027 }
1028
1029 qdf_export_symbol(qdf_nbuf_smmu_map_debug);
1030
qdf_nbuf_smmu_unmap_debug(qdf_nbuf_t nbuf,uint8_t hdl,uint8_t num_buffers,qdf_mem_info_t * info,const char * func,uint32_t line)1031 QDF_STATUS qdf_nbuf_smmu_unmap_debug(qdf_nbuf_t nbuf,
1032 uint8_t hdl,
1033 uint8_t num_buffers,
1034 qdf_mem_info_t *info,
1035 const char *func,
1036 uint32_t line)
1037 {
1038 return __qdf_ipa_wdi_release_smmu_mapping(hdl, num_buffers, info);
1039 }
1040
1041 qdf_export_symbol(qdf_nbuf_smmu_unmap_debug);
1042 #endif /* IPA_OFFLOAD */
1043 #endif /* NBUF_SMMU_MAP_UNMAP_DEBUG */
1044
1045 #ifdef NBUF_MAP_UNMAP_DEBUG
1046 #define qdf_nbuf_map_tracker_bits 11 /* 2048 buckets */
1047 qdf_tracker_declare(qdf_nbuf_map_tracker, qdf_nbuf_map_tracker_bits,
1048 "nbuf map-no-unmap events", "nbuf map", "nbuf unmap");
1049
qdf_nbuf_map_tracking_init(void)1050 static void qdf_nbuf_map_tracking_init(void)
1051 {
1052 qdf_tracker_init(&qdf_nbuf_map_tracker);
1053 }
1054
qdf_nbuf_map_tracking_deinit(void)1055 static void qdf_nbuf_map_tracking_deinit(void)
1056 {
1057 qdf_tracker_deinit(&qdf_nbuf_map_tracker);
1058 }
1059
1060 static QDF_STATUS
qdf_nbuf_track_map(qdf_nbuf_t nbuf,const char * func,uint32_t line)1061 qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
1062 {
1063 if (is_initial_mem_debug_disabled)
1064 return QDF_STATUS_SUCCESS;
1065
1066 return qdf_tracker_track(&qdf_nbuf_map_tracker, nbuf, func, line);
1067 }
1068
1069 static void
qdf_nbuf_untrack_map(qdf_nbuf_t nbuf,const char * func,uint32_t line)1070 qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
1071 {
1072 if (is_initial_mem_debug_disabled)
1073 return;
1074
1075 qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_UNMAP);
1076 qdf_tracker_untrack(&qdf_nbuf_map_tracker, nbuf, func, line);
1077 }
1078
qdf_nbuf_map_check_for_leaks(void)1079 void qdf_nbuf_map_check_for_leaks(void)
1080 {
1081 qdf_tracker_check_for_leaks(&qdf_nbuf_map_tracker);
1082 }
1083
qdf_nbuf_map_debug(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_dir_t dir,const char * func,uint32_t line)1084 QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev,
1085 qdf_nbuf_t buf,
1086 qdf_dma_dir_t dir,
1087 const char *func,
1088 uint32_t line)
1089 {
1090 QDF_STATUS status;
1091
1092 status = qdf_nbuf_track_map(buf, func, line);
1093 if (QDF_IS_STATUS_ERROR(status))
1094 return status;
1095
1096 status = __qdf_nbuf_map(osdev, buf, dir);
1097 if (QDF_IS_STATUS_ERROR(status)) {
1098 qdf_nbuf_untrack_map(buf, func, line);
1099 } else {
1100 if (!is_initial_mem_debug_disabled)
1101 qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
1102 qdf_net_buf_debug_update_map_node(buf, func, line);
1103 }
1104
1105 return status;
1106 }
1107
1108 qdf_export_symbol(qdf_nbuf_map_debug);
1109
qdf_nbuf_unmap_debug(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_dir_t dir,const char * func,uint32_t line)1110 void qdf_nbuf_unmap_debug(qdf_device_t osdev,
1111 qdf_nbuf_t buf,
1112 qdf_dma_dir_t dir,
1113 const char *func,
1114 uint32_t line)
1115 {
1116 qdf_nbuf_untrack_map(buf, func, line);
1117 __qdf_nbuf_unmap_single(osdev, buf, dir);
1118 qdf_net_buf_debug_update_unmap_node(buf, func, line);
1119 }
1120
1121 qdf_export_symbol(qdf_nbuf_unmap_debug);
1122
qdf_nbuf_map_single_debug(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_dir_t dir,const char * func,uint32_t line)1123 QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev,
1124 qdf_nbuf_t buf,
1125 qdf_dma_dir_t dir,
1126 const char *func,
1127 uint32_t line)
1128 {
1129 QDF_STATUS status;
1130
1131 status = qdf_nbuf_track_map(buf, func, line);
1132 if (QDF_IS_STATUS_ERROR(status))
1133 return status;
1134
1135 status = __qdf_nbuf_map_single(osdev, buf, dir);
1136 if (QDF_IS_STATUS_ERROR(status)) {
1137 qdf_nbuf_untrack_map(buf, func, line);
1138 } else {
1139 if (!is_initial_mem_debug_disabled)
1140 qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
1141 qdf_net_buf_debug_update_map_node(buf, func, line);
1142 }
1143
1144 return status;
1145 }
1146
1147 qdf_export_symbol(qdf_nbuf_map_single_debug);
1148
qdf_nbuf_unmap_single_debug(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_dir_t dir,const char * func,uint32_t line)1149 void qdf_nbuf_unmap_single_debug(qdf_device_t osdev,
1150 qdf_nbuf_t buf,
1151 qdf_dma_dir_t dir,
1152 const char *func,
1153 uint32_t line)
1154 {
1155 qdf_nbuf_untrack_map(buf, func, line);
1156 __qdf_nbuf_unmap_single(osdev, buf, dir);
1157 qdf_net_buf_debug_update_unmap_node(buf, func, line);
1158 }
1159
1160 qdf_export_symbol(qdf_nbuf_unmap_single_debug);
1161
qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_dir_t dir,int nbytes,const char * func,uint32_t line)1162 QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,
1163 qdf_nbuf_t buf,
1164 qdf_dma_dir_t dir,
1165 int nbytes,
1166 const char *func,
1167 uint32_t line)
1168 {
1169 QDF_STATUS status;
1170
1171 status = qdf_nbuf_track_map(buf, func, line);
1172 if (QDF_IS_STATUS_ERROR(status))
1173 return status;
1174
1175 status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes);
1176 if (QDF_IS_STATUS_ERROR(status)) {
1177 qdf_nbuf_untrack_map(buf, func, line);
1178 } else {
1179 if (!is_initial_mem_debug_disabled)
1180 qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
1181 qdf_net_buf_debug_update_map_node(buf, func, line);
1182 }
1183
1184 return status;
1185 }
1186
1187 qdf_export_symbol(qdf_nbuf_map_nbytes_debug);
1188
qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_dir_t dir,int nbytes,const char * func,uint32_t line)1189 void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,
1190 qdf_nbuf_t buf,
1191 qdf_dma_dir_t dir,
1192 int nbytes,
1193 const char *func,
1194 uint32_t line)
1195 {
1196 qdf_nbuf_untrack_map(buf, func, line);
1197 __qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes);
1198 qdf_net_buf_debug_update_unmap_node(buf, func, line);
1199 }
1200
1201 qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug);
1202
qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_dir_t dir,int nbytes,const char * func,uint32_t line)1203 QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,
1204 qdf_nbuf_t buf,
1205 qdf_dma_dir_t dir,
1206 int nbytes,
1207 const char *func,
1208 uint32_t line)
1209 {
1210 QDF_STATUS status;
1211
1212 status = qdf_nbuf_track_map(buf, func, line);
1213 if (QDF_IS_STATUS_ERROR(status))
1214 return status;
1215
1216 status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes);
1217 if (QDF_IS_STATUS_ERROR(status)) {
1218 qdf_nbuf_untrack_map(buf, func, line);
1219 } else {
1220 if (!is_initial_mem_debug_disabled)
1221 qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
1222 qdf_net_buf_debug_update_map_node(buf, func, line);
1223 }
1224
1225 return status;
1226 }
1227
1228 qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug);
1229
qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_dir_t dir,int nbytes,const char * func,uint32_t line)1230 void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,
1231 qdf_nbuf_t buf,
1232 qdf_dma_dir_t dir,
1233 int nbytes,
1234 const char *func,
1235 uint32_t line)
1236 {
1237 qdf_nbuf_untrack_map(buf, func, line);
1238 __qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes);
1239 qdf_net_buf_debug_update_unmap_node(buf, func, line);
1240 }
1241
1242 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug);
1243
qdf_nbuf_unmap_nbytes_single_paddr_debug(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_addr_t phy_addr,qdf_dma_dir_t dir,int nbytes,const char * func,uint32_t line)1244 void qdf_nbuf_unmap_nbytes_single_paddr_debug(qdf_device_t osdev,
1245 qdf_nbuf_t buf,
1246 qdf_dma_addr_t phy_addr,
1247 qdf_dma_dir_t dir, int nbytes,
1248 const char *func, uint32_t line)
1249 {
1250 qdf_nbuf_untrack_map(buf, func, line);
1251 __qdf_record_nbuf_nbytes(__qdf_nbuf_get_end_offset(buf), dir, false);
1252 __qdf_mem_unmap_nbytes_single(osdev, phy_addr, dir, nbytes);
1253 qdf_net_buf_debug_update_unmap_node(buf, func, line);
1254 }
1255
1256 qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_paddr_debug);
1257
qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,const char * func,uint32_t line)1258 static void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
1259 const char *func,
1260 uint32_t line)
1261 {
1262 char map_func[QDF_TRACKER_FUNC_SIZE];
1263 uint32_t map_line;
1264
1265 if (!qdf_tracker_lookup(&qdf_nbuf_map_tracker, nbuf,
1266 &map_func, &map_line))
1267 return;
1268
1269 QDF_MEMDEBUG_PANIC("Nbuf freed @ %s:%u while mapped from %s:%u",
1270 func, line, map_func, map_line);
1271 }
1272 #else
qdf_nbuf_map_tracking_init(void)1273 static inline void qdf_nbuf_map_tracking_init(void)
1274 {
1275 }
1276
qdf_nbuf_map_tracking_deinit(void)1277 static inline void qdf_nbuf_map_tracking_deinit(void)
1278 {
1279 }
1280
qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,const char * func,uint32_t line)1281 static inline void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
1282 const char *func,
1283 uint32_t line)
1284 {
1285 }
1286 #endif /* NBUF_MAP_UNMAP_DEBUG */
1287
1288 #ifdef QDF_OS_DEBUG
1289 QDF_STATUS
__qdf_nbuf_map(qdf_device_t osdev,struct sk_buff * skb,qdf_dma_dir_t dir)1290 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
1291 {
1292 struct skb_shared_info *sh = skb_shinfo(skb);
1293
1294 qdf_assert((dir == QDF_DMA_TO_DEVICE)
1295 || (dir == QDF_DMA_FROM_DEVICE));
1296
1297 /*
1298 * Assume there's only a single fragment.
1299 * To support multiple fragments, it would be necessary to change
1300 * qdf_nbuf_t to be a separate object that stores meta-info
1301 * (including the bus address for each fragment) and a pointer
1302 * to the underlying sk_buff.
1303 */
1304 qdf_assert(sh->nr_frags == 0);
1305
1306 return __qdf_nbuf_map_single(osdev, skb, dir);
1307 }
1308 qdf_export_symbol(__qdf_nbuf_map);
1309
1310 #else
1311 QDF_STATUS
__qdf_nbuf_map(qdf_device_t osdev,struct sk_buff * skb,qdf_dma_dir_t dir)1312 __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
1313 {
1314 return __qdf_nbuf_map_single(osdev, skb, dir);
1315 }
1316 qdf_export_symbol(__qdf_nbuf_map);
1317 #endif
1318
1319 void
__qdf_nbuf_unmap(qdf_device_t osdev,struct sk_buff * skb,qdf_dma_dir_t dir)1320 __qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
1321 qdf_dma_dir_t dir)
1322 {
1323 qdf_assert((dir == QDF_DMA_TO_DEVICE)
1324 || (dir == QDF_DMA_FROM_DEVICE));
1325
1326 /*
1327 * Assume there's a single fragment.
1328 * If this is not true, the assertion in __qdf_nbuf_map will catch it.
1329 */
1330 __qdf_nbuf_unmap_single(osdev, skb, dir);
1331 }
1332 qdf_export_symbol(__qdf_nbuf_unmap);
1333
1334 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
1335 QDF_STATUS
__qdf_nbuf_map_single(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_dir_t dir)1336 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
1337 {
1338 qdf_dma_addr_t paddr;
1339
1340 QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data;
1341 BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data));
1342 BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data));
1343 return QDF_STATUS_SUCCESS;
1344 }
1345 qdf_export_symbol(__qdf_nbuf_map_single);
1346 #else
1347 QDF_STATUS
__qdf_nbuf_map_single(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_dir_t dir)1348 __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
1349 {
1350 qdf_dma_addr_t paddr;
1351
1352 /* assume that the OS only provides a single fragment */
1353 QDF_NBUF_CB_PADDR(buf) = paddr =
1354 dma_map_single(osdev->dev, buf->data,
1355 skb_end_pointer(buf) - buf->data,
1356 __qdf_dma_dir_to_os(dir));
1357 __qdf_record_nbuf_nbytes(
1358 __qdf_nbuf_get_end_offset(buf), dir, true);
1359 return dma_mapping_error(osdev->dev, paddr)
1360 ? QDF_STATUS_E_FAILURE
1361 : QDF_STATUS_SUCCESS;
1362 }
1363 qdf_export_symbol(__qdf_nbuf_map_single);
1364 #endif
1365
1366 #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
__qdf_nbuf_unmap_single(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_dir_t dir)1367 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
1368 qdf_dma_dir_t dir)
1369 {
1370 }
1371 #else
__qdf_nbuf_unmap_single(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_dir_t dir)1372 void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
1373 qdf_dma_dir_t dir)
1374 {
1375 if (QDF_NBUF_CB_PADDR(buf)) {
1376 __qdf_record_nbuf_nbytes(
1377 __qdf_nbuf_get_end_offset(buf), dir, false);
1378 dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
1379 skb_end_pointer(buf) - buf->data,
1380 __qdf_dma_dir_to_os(dir));
1381 }
1382 }
1383 #endif
1384 qdf_export_symbol(__qdf_nbuf_unmap_single);
1385
1386 QDF_STATUS
__qdf_nbuf_set_rx_cksum(struct sk_buff * skb,qdf_nbuf_rx_cksum_t * cksum)1387 __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
1388 {
1389 switch (cksum->l4_result) {
1390 case QDF_NBUF_RX_CKSUM_NONE:
1391 skb->ip_summed = CHECKSUM_NONE;
1392 break;
1393 case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
1394 skb->ip_summed = CHECKSUM_UNNECESSARY;
1395 skb->csum_level = cksum->csum_level;
1396 break;
1397 case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
1398 skb->ip_summed = CHECKSUM_PARTIAL;
1399 skb->csum = cksum->val;
1400 break;
1401 default:
1402 pr_err("Unknown checksum type\n");
1403 qdf_assert(0);
1404 return QDF_STATUS_E_NOSUPPORT;
1405 }
1406 return QDF_STATUS_SUCCESS;
1407 }
1408 qdf_export_symbol(__qdf_nbuf_set_rx_cksum);
1409
__qdf_nbuf_get_tx_cksum(struct sk_buff * skb)1410 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
1411 {
1412 switch (skb->ip_summed) {
1413 case CHECKSUM_NONE:
1414 return QDF_NBUF_TX_CKSUM_NONE;
1415 case CHECKSUM_PARTIAL:
1416 return QDF_NBUF_TX_CKSUM_TCP_UDP;
1417 case CHECKSUM_COMPLETE:
1418 return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
1419 default:
1420 return QDF_NBUF_TX_CKSUM_NONE;
1421 }
1422 }
1423 qdf_export_symbol(__qdf_nbuf_get_tx_cksum);
1424
__qdf_nbuf_get_tid(struct sk_buff * skb)1425 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
1426 {
1427 return skb->priority;
1428 }
1429 qdf_export_symbol(__qdf_nbuf_get_tid);
1430
__qdf_nbuf_set_tid(struct sk_buff * skb,uint8_t tid)1431 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
1432 {
1433 skb->priority = tid;
1434 }
1435 qdf_export_symbol(__qdf_nbuf_set_tid);
1436
__qdf_nbuf_get_exemption_type(struct sk_buff * skb)1437 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
1438 {
1439 return QDF_NBUF_EXEMPT_NO_EXEMPTION;
1440 }
1441 qdf_export_symbol(__qdf_nbuf_get_exemption_type);
1442
__qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)1443 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
1444 {
1445 qdf_trace_update_cb = cb_func_ptr;
1446 }
1447 qdf_export_symbol(__qdf_nbuf_reg_trace_cb);
1448
1449 enum qdf_proto_subtype
__qdf_nbuf_data_get_dhcp_subtype(uint8_t * data)1450 __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data)
1451 {
1452 enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1453
1454 if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) &&
1455 (data[QDF_DHCP_OPTION53_LENGTH_OFFSET] ==
1456 QDF_DHCP_OPTION53_LENGTH)) {
1457
1458 switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) {
1459 case QDF_DHCP_DISCOVER:
1460 subtype = QDF_PROTO_DHCP_DISCOVER;
1461 break;
1462 case QDF_DHCP_REQUEST:
1463 subtype = QDF_PROTO_DHCP_REQUEST;
1464 break;
1465 case QDF_DHCP_OFFER:
1466 subtype = QDF_PROTO_DHCP_OFFER;
1467 break;
1468 case QDF_DHCP_ACK:
1469 subtype = QDF_PROTO_DHCP_ACK;
1470 break;
1471 case QDF_DHCP_NAK:
1472 subtype = QDF_PROTO_DHCP_NACK;
1473 break;
1474 case QDF_DHCP_RELEASE:
1475 subtype = QDF_PROTO_DHCP_RELEASE;
1476 break;
1477 case QDF_DHCP_INFORM:
1478 subtype = QDF_PROTO_DHCP_INFORM;
1479 break;
1480 case QDF_DHCP_DECLINE:
1481 subtype = QDF_PROTO_DHCP_DECLINE;
1482 break;
1483 default:
1484 break;
1485 }
1486 }
1487
1488 return subtype;
1489 }
1490
1491 #define EAPOL_WPA_KEY_INFO_ACK BIT(7)
1492 #define EAPOL_WPA_KEY_INFO_MIC BIT(8)
1493 #define EAPOL_WPA_KEY_INFO_ENCR_KEY_DATA BIT(12) /* IEEE 802.11i/RSN only */
1494
1495 /**
1496 * __qdf_nbuf_data_get_eapol_key() - Get EAPOL key
1497 * @data: Pointer to EAPOL packet data buffer
1498 *
1499 * We can distinguish M1/M3 from M2/M4 by the ack bit in the keyinfo field
1500 * The ralationship between the ack bit and EAPOL type is as follows:
1501 *
1502 * EAPOL type | M1 M2 M3 M4
1503 * --------------------------------------
1504 * Ack | 1 0 1 0
1505 * --------------------------------------
1506 *
1507 * Then, we can differentiate M1 from M3, M2 from M4 by below methods:
1508 * M2/M4: by keyDataLength or Nonce value being 0 for M4.
1509 * M1/M3: by the mic/encrKeyData bit in the keyinfo field.
1510 *
1511 * Return: subtype of the EAPOL packet.
1512 */
1513 static inline enum qdf_proto_subtype
__qdf_nbuf_data_get_eapol_key(uint8_t * data)1514 __qdf_nbuf_data_get_eapol_key(uint8_t *data)
1515 {
1516 uint16_t key_info, key_data_length;
1517 enum qdf_proto_subtype subtype;
1518 uint64_t *key_nonce;
1519
1520 key_info = qdf_ntohs((uint16_t)(*(uint16_t *)
1521 (data + EAPOL_KEY_INFO_OFFSET)));
1522
1523 key_data_length = qdf_ntohs((uint16_t)(*(uint16_t *)
1524 (data + EAPOL_KEY_DATA_LENGTH_OFFSET)));
1525 key_nonce = (uint64_t *)(data + EAPOL_WPA_KEY_NONCE_OFFSET);
1526
1527 if (key_info & EAPOL_WPA_KEY_INFO_ACK)
1528 if (key_info &
1529 (EAPOL_WPA_KEY_INFO_MIC | EAPOL_WPA_KEY_INFO_ENCR_KEY_DATA))
1530 subtype = QDF_PROTO_EAPOL_M3;
1531 else
1532 subtype = QDF_PROTO_EAPOL_M1;
1533 else
1534 if (key_data_length == 0 ||
1535 !((*key_nonce) || (*(key_nonce + 1)) ||
1536 (*(key_nonce + 2)) || (*(key_nonce + 3))))
1537 subtype = QDF_PROTO_EAPOL_M4;
1538 else
1539 subtype = QDF_PROTO_EAPOL_M2;
1540
1541 return subtype;
1542 }
1543
1544 /**
1545 * __qdf_nbuf_data_get_exp_msg_type() - Get EAP expanded msg type
1546 * @data: Pointer to EAPOL packet data buffer
1547 * @code: EAP code
1548 *
1549 * Return: subtype of the EAPOL packet.
1550 */
1551 static inline enum qdf_proto_subtype
__qdf_nbuf_data_get_exp_msg_type(uint8_t * data,uint8_t code)1552 __qdf_nbuf_data_get_exp_msg_type(uint8_t *data, uint8_t code)
1553 {
1554 uint8_t msg_type;
1555 uint8_t opcode = *(data + EAP_EXP_MSG_OPCODE_OFFSET);
1556
1557 switch (opcode) {
1558 case WSC_START:
1559 return QDF_PROTO_EAP_WSC_START;
1560 case WSC_ACK:
1561 return QDF_PROTO_EAP_WSC_ACK;
1562 case WSC_NACK:
1563 return QDF_PROTO_EAP_WSC_NACK;
1564 case WSC_MSG:
1565 msg_type = *(data + EAP_EXP_MSG_TYPE_OFFSET);
1566 switch (msg_type) {
1567 case EAP_EXP_TYPE_M1:
1568 return QDF_PROTO_EAP_M1;
1569 case EAP_EXP_TYPE_M2:
1570 return QDF_PROTO_EAP_M2;
1571 case EAP_EXP_TYPE_M3:
1572 return QDF_PROTO_EAP_M3;
1573 case EAP_EXP_TYPE_M4:
1574 return QDF_PROTO_EAP_M4;
1575 case EAP_EXP_TYPE_M5:
1576 return QDF_PROTO_EAP_M5;
1577 case EAP_EXP_TYPE_M6:
1578 return QDF_PROTO_EAP_M6;
1579 case EAP_EXP_TYPE_M7:
1580 return QDF_PROTO_EAP_M7;
1581 case EAP_EXP_TYPE_M8:
1582 return QDF_PROTO_EAP_M8;
1583 default:
1584 break;
1585 }
1586 break;
1587 case WSC_DONE:
1588 return QDF_PROTO_EAP_WSC_DONE;
1589 case WSC_FRAG_ACK:
1590 return QDF_PROTO_EAP_WSC_FRAG_ACK;
1591 default:
1592 break;
1593 }
1594 switch (code) {
1595 case QDF_EAP_REQUEST:
1596 return QDF_PROTO_EAP_REQUEST;
1597 case QDF_EAP_RESPONSE:
1598 return QDF_PROTO_EAP_RESPONSE;
1599 default:
1600 return QDF_PROTO_INVALID;
1601 }
1602 }
1603
1604 /**
1605 * __qdf_nbuf_data_get_eap_type() - Get EAP type
1606 * @data: Pointer to EAPOL packet data buffer
1607 * @code: EAP code
1608 *
1609 * Return: subtype of the EAPOL packet.
1610 */
1611 static inline enum qdf_proto_subtype
__qdf_nbuf_data_get_eap_type(uint8_t * data,uint8_t code)1612 __qdf_nbuf_data_get_eap_type(uint8_t *data, uint8_t code)
1613 {
1614 uint8_t type = *(data + EAP_TYPE_OFFSET);
1615
1616 switch (type) {
1617 case EAP_PACKET_TYPE_EXP:
1618 return __qdf_nbuf_data_get_exp_msg_type(data, code);
1619 case EAP_PACKET_TYPE_ID:
1620 switch (code) {
1621 case QDF_EAP_REQUEST:
1622 return QDF_PROTO_EAP_REQ_ID;
1623 case QDF_EAP_RESPONSE:
1624 return QDF_PROTO_EAP_RSP_ID;
1625 default:
1626 return QDF_PROTO_INVALID;
1627 }
1628 default:
1629 switch (code) {
1630 case QDF_EAP_REQUEST:
1631 return QDF_PROTO_EAP_REQUEST;
1632 case QDF_EAP_RESPONSE:
1633 return QDF_PROTO_EAP_RESPONSE;
1634 default:
1635 return QDF_PROTO_INVALID;
1636 }
1637 }
1638 }
1639
1640 /**
1641 * __qdf_nbuf_data_get_eap_code() - Get EAPOL code
1642 * @data: Pointer to EAPOL packet data buffer
1643 *
1644 * Return: subtype of the EAPOL packet.
1645 */
1646 static inline enum qdf_proto_subtype
__qdf_nbuf_data_get_eap_code(uint8_t * data)1647 __qdf_nbuf_data_get_eap_code(uint8_t *data)
1648 {
1649 uint8_t code = *(data + EAP_CODE_OFFSET);
1650
1651 switch (code) {
1652 case QDF_EAP_REQUEST:
1653 case QDF_EAP_RESPONSE:
1654 return __qdf_nbuf_data_get_eap_type(data, code);
1655 case QDF_EAP_SUCCESS:
1656 return QDF_PROTO_EAP_SUCCESS;
1657 case QDF_EAP_FAILURE:
1658 return QDF_PROTO_EAP_FAILURE;
1659 case QDF_EAP_INITIATE:
1660 return QDF_PROTO_EAP_INITIATE;
1661 case QDF_EAP_FINISH:
1662 return QDF_PROTO_EAP_FINISH;
1663 default:
1664 return QDF_PROTO_INVALID;
1665 }
1666 }
1667
1668 enum qdf_proto_subtype
__qdf_nbuf_data_get_eapol_subtype(uint8_t * data)1669 __qdf_nbuf_data_get_eapol_subtype(uint8_t *data)
1670 {
1671 uint8_t pkt_type = *(data + EAPOL_PACKET_TYPE_OFFSET);
1672
1673 switch (pkt_type) {
1674 case EAPOL_PACKET_TYPE_EAP:
1675 return __qdf_nbuf_data_get_eap_code(data);
1676 case EAPOL_PACKET_TYPE_START:
1677 return QDF_PROTO_EAPOL_START;
1678 case EAPOL_PACKET_TYPE_LOGOFF:
1679 return QDF_PROTO_EAPOL_LOGOFF;
1680 case EAPOL_PACKET_TYPE_KEY:
1681 return __qdf_nbuf_data_get_eapol_key(data);
1682 case EAPOL_PACKET_TYPE_ASF:
1683 return QDF_PROTO_EAPOL_ASF;
1684 default:
1685 return QDF_PROTO_INVALID;
1686 }
1687 }
1688
1689 qdf_export_symbol(__qdf_nbuf_data_get_eapol_subtype);
1690
1691 enum qdf_proto_subtype
__qdf_nbuf_data_get_arp_subtype(uint8_t * data)1692 __qdf_nbuf_data_get_arp_subtype(uint8_t *data)
1693 {
1694 uint16_t subtype;
1695 enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1696
1697 subtype = (uint16_t)(*(uint16_t *)
1698 (data + ARP_SUB_TYPE_OFFSET));
1699
1700 switch (QDF_SWAP_U16(subtype)) {
1701 case ARP_REQUEST:
1702 proto_subtype = QDF_PROTO_ARP_REQ;
1703 break;
1704 case ARP_RESPONSE:
1705 proto_subtype = QDF_PROTO_ARP_RES;
1706 break;
1707 default:
1708 break;
1709 }
1710
1711 return proto_subtype;
1712 }
1713
1714 enum qdf_proto_subtype
__qdf_nbuf_data_get_icmp_subtype(uint8_t * data)1715 __qdf_nbuf_data_get_icmp_subtype(uint8_t *data)
1716 {
1717 uint8_t subtype;
1718 enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1719
1720 subtype = (uint8_t)(*(uint8_t *)
1721 (data + ICMP_SUBTYPE_OFFSET));
1722
1723 switch (subtype) {
1724 case ICMP_REQUEST:
1725 proto_subtype = QDF_PROTO_ICMP_REQ;
1726 break;
1727 case ICMP_RESPONSE:
1728 proto_subtype = QDF_PROTO_ICMP_RES;
1729 break;
1730 default:
1731 break;
1732 }
1733
1734 return proto_subtype;
1735 }
1736
1737 enum qdf_proto_subtype
__qdf_nbuf_data_get_icmpv6_subtype(uint8_t * data)1738 __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data)
1739 {
1740 uint8_t subtype;
1741 enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1742
1743 subtype = (uint8_t)(*(uint8_t *)
1744 (data + ICMPV6_SUBTYPE_OFFSET));
1745
1746 switch (subtype) {
1747 case ICMPV6_REQUEST:
1748 proto_subtype = QDF_PROTO_ICMPV6_REQ;
1749 break;
1750 case ICMPV6_RESPONSE:
1751 proto_subtype = QDF_PROTO_ICMPV6_RES;
1752 break;
1753 case ICMPV6_RS:
1754 proto_subtype = QDF_PROTO_ICMPV6_RS;
1755 break;
1756 case ICMPV6_RA:
1757 proto_subtype = QDF_PROTO_ICMPV6_RA;
1758 break;
1759 case ICMPV6_NS:
1760 proto_subtype = QDF_PROTO_ICMPV6_NS;
1761 break;
1762 case ICMPV6_NA:
1763 proto_subtype = QDF_PROTO_ICMPV6_NA;
1764 break;
1765 default:
1766 break;
1767 }
1768
1769 return proto_subtype;
1770 }
1771
1772 bool
__qdf_nbuf_is_ipv4_last_fragment(struct sk_buff * skb)1773 __qdf_nbuf_is_ipv4_last_fragment(struct sk_buff *skb)
1774 {
1775 if (((ntohs(ip_hdr(skb)->frag_off) & ~IP_OFFSET) & IP_MF) == 0)
1776 return true;
1777
1778 return false;
1779 }
1780
1781 bool
__qdf_nbuf_is_ipv4_fragment(struct sk_buff * skb)1782 __qdf_nbuf_is_ipv4_fragment(struct sk_buff *skb)
1783 {
1784 if (ntohs(ip_hdr(skb)->frag_off) & IP_MF)
1785 return true;
1786
1787 return false;
1788 }
1789
1790 void
__qdf_nbuf_data_set_ipv4_tos(uint8_t * data,uint8_t tos)1791 __qdf_nbuf_data_set_ipv4_tos(uint8_t *data, uint8_t tos)
1792 {
1793 *(uint8_t *)(data + QDF_NBUF_TRAC_IPV4_TOS_OFFSET) = tos;
1794 }
1795
1796 uint8_t
__qdf_nbuf_data_get_ipv4_tos(uint8_t * data)1797 __qdf_nbuf_data_get_ipv4_tos(uint8_t *data)
1798 {
1799 uint8_t tos;
1800
1801 tos = (uint8_t)(*(uint8_t *)(data +
1802 QDF_NBUF_TRAC_IPV4_TOS_OFFSET));
1803 return tos;
1804 }
1805
1806 uint8_t
__qdf_nbuf_data_get_ipv4_proto(uint8_t * data)1807 __qdf_nbuf_data_get_ipv4_proto(uint8_t *data)
1808 {
1809 uint8_t proto_type;
1810
1811 proto_type = (uint8_t)(*(uint8_t *)(data +
1812 QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1813 return proto_type;
1814 }
1815
1816 uint8_t
__qdf_nbuf_data_get_ipv6_tc(uint8_t * data)1817 __qdf_nbuf_data_get_ipv6_tc(uint8_t *data)
1818 {
1819 struct ipv6hdr *hdr;
1820
1821 hdr = (struct ipv6hdr *)(data + QDF_NBUF_TRAC_IPV6_OFFSET);
1822 return ip6_tclass(ip6_flowinfo(hdr));
1823 }
1824
1825 void
__qdf_nbuf_data_set_ipv6_tc(uint8_t * data,uint8_t tc)1826 __qdf_nbuf_data_set_ipv6_tc(uint8_t *data, uint8_t tc)
1827 {
1828 struct ipv6hdr *hdr;
1829
1830 hdr = (struct ipv6hdr *)(data + QDF_NBUF_TRAC_IPV6_OFFSET);
1831 ip6_flow_hdr(hdr, tc, ip6_flowlabel(hdr));
1832 }
1833
1834 uint8_t
__qdf_nbuf_data_get_ipv6_proto(uint8_t * data)1835 __qdf_nbuf_data_get_ipv6_proto(uint8_t *data)
1836 {
1837 uint8_t proto_type;
1838
1839 proto_type = (uint8_t)(*(uint8_t *)(data +
1840 QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1841 return proto_type;
1842 }
1843
__qdf_nbuf_data_is_ipv4_pkt(uint8_t * data)1844 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data)
1845 {
1846 uint16_t ether_type;
1847
1848 ether_type = (uint16_t)(*(uint16_t *)(data +
1849 QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1850
1851 if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1852 return true;
1853 else
1854 return false;
1855 }
1856 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt);
1857
__qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t * data)1858 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data)
1859 {
1860 uint16_t sport;
1861 uint16_t dport;
1862 uint8_t ipv4_offset;
1863 uint8_t ipv4_hdr_len;
1864 struct iphdr *iphdr;
1865
1866 if (__qdf_nbuf_get_ether_type(data) !=
1867 QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1868 return false;
1869
1870 ipv4_offset = __qdf_nbuf_get_ip_offset(data);
1871 iphdr = (struct iphdr *)(data + ipv4_offset);
1872 ipv4_hdr_len = iphdr->ihl * QDF_NBUF_IPV4_HDR_SIZE_UNIT;
1873
1874 sport = *(uint16_t *)(data + ipv4_offset + ipv4_hdr_len);
1875 dport = *(uint16_t *)(data + ipv4_offset + ipv4_hdr_len +
1876 sizeof(uint16_t));
1877
1878 if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) &&
1879 (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) ||
1880 ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) &&
1881 (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT))))
1882 return true;
1883 else
1884 return false;
1885 }
1886 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt);
1887
1888 /**
1889 * qdf_is_eapol_type() - check if packet is EAPOL
1890 * @type: Packet type
1891 *
1892 * This api is to check if frame is EAPOL packet type.
1893 *
1894 * Return: true if it is EAPOL frame
1895 * false otherwise.
1896 */
1897 #ifdef BIG_ENDIAN_HOST
qdf_is_eapol_type(uint16_t type)1898 static inline bool qdf_is_eapol_type(uint16_t type)
1899 {
1900 return (type == QDF_NBUF_TRAC_EAPOL_ETH_TYPE);
1901 }
1902 #else
qdf_is_eapol_type(uint16_t type)1903 static inline bool qdf_is_eapol_type(uint16_t type)
1904 {
1905 return (type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE));
1906 }
1907 #endif
1908
__qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t * data)1909 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data)
1910 {
1911 uint16_t ether_type;
1912
1913 ether_type = __qdf_nbuf_get_ether_type(data);
1914
1915 return qdf_is_eapol_type(ether_type);
1916 }
1917 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt);
1918
__qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff * skb)1919 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb)
1920 {
1921 uint16_t ether_type;
1922
1923 ether_type = (uint16_t)(*(uint16_t *)(skb->data +
1924 QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1925
1926 if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE))
1927 return true;
1928 else
1929 return false;
1930 }
1931 qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt);
1932
1933 /**
1934 * qdf_nbuf_is_ipv6_vlan_pkt() - check whether packet is vlan IPV6
1935 * @data: Pointer to network data buffer
1936 *
1937 * This api is for vlan header included ipv6 packet.
1938 *
1939 * Return: true if packet is vlan header included IPV6
1940 * false otherwise.
1941 */
qdf_nbuf_is_ipv6_vlan_pkt(uint8_t * data)1942 static bool qdf_nbuf_is_ipv6_vlan_pkt(uint8_t *data)
1943 {
1944 uint16_t ether_type;
1945
1946 ether_type = *(uint16_t *)(data + QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
1947
1948 if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q))) {
1949 ether_type = *(uint16_t *)(data +
1950 QDF_NBUF_TRAC_VLAN_ETH_TYPE_OFFSET);
1951
1952 if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
1953 return true;
1954 }
1955 return false;
1956 }
1957
1958 /**
1959 * qdf_nbuf_is_ipv4_vlan_pkt() - check whether packet is vlan IPV4
1960 * @data: Pointer to network data buffer
1961 *
1962 * This api is for vlan header included ipv4 packet.
1963 *
1964 * Return: true if packet is vlan header included IPV4
1965 * false otherwise.
1966 */
qdf_nbuf_is_ipv4_vlan_pkt(uint8_t * data)1967 static bool qdf_nbuf_is_ipv4_vlan_pkt(uint8_t *data)
1968 {
1969 uint16_t ether_type;
1970
1971 ether_type = *(uint16_t *)(data + QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
1972
1973 if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q))) {
1974 ether_type = *(uint16_t *)(data +
1975 QDF_NBUF_TRAC_VLAN_ETH_TYPE_OFFSET);
1976
1977 if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1978 return true;
1979 }
1980 return false;
1981 }
1982
__qdf_nbuf_data_is_ipv4_igmp_pkt(uint8_t * data)1983 bool __qdf_nbuf_data_is_ipv4_igmp_pkt(uint8_t *data)
1984 {
1985 uint8_t pkt_type;
1986
1987 if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1988 pkt_type = (uint8_t)(*(uint8_t *)(data +
1989 QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1990 goto is_igmp;
1991 }
1992
1993 if (qdf_nbuf_is_ipv4_vlan_pkt(data)) {
1994 pkt_type = (uint8_t)(*(uint8_t *)(
1995 data +
1996 QDF_NBUF_TRAC_VLAN_IPV4_PROTO_TYPE_OFFSET));
1997 goto is_igmp;
1998 }
1999
2000 return false;
2001 is_igmp:
2002 if (pkt_type == QDF_NBUF_TRAC_IGMP_TYPE)
2003 return true;
2004
2005 return false;
2006 }
2007
2008 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_igmp_pkt);
2009
__qdf_nbuf_data_is_ipv6_igmp_pkt(uint8_t * data)2010 bool __qdf_nbuf_data_is_ipv6_igmp_pkt(uint8_t *data)
2011 {
2012 uint8_t pkt_type;
2013 uint8_t next_hdr;
2014
2015 if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2016 pkt_type = (uint8_t)(*(uint8_t *)(data +
2017 QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2018 next_hdr = (uint8_t)(*(uint8_t *)(
2019 data +
2020 QDF_NBUF_TRAC_IPV6_OFFSET +
2021 QDF_NBUF_TRAC_IPV6_HEADER_SIZE));
2022 goto is_mld;
2023 }
2024
2025 if (qdf_nbuf_is_ipv6_vlan_pkt(data)) {
2026 pkt_type = (uint8_t)(*(uint8_t *)(
2027 data +
2028 QDF_NBUF_TRAC_VLAN_IPV6_PROTO_TYPE_OFFSET));
2029 next_hdr = (uint8_t)(*(uint8_t *)(
2030 data +
2031 QDF_NBUF_TRAC_VLAN_IPV6_OFFSET +
2032 QDF_NBUF_TRAC_IPV6_HEADER_SIZE));
2033 goto is_mld;
2034 }
2035
2036 return false;
2037 is_mld:
2038 if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
2039 return true;
2040 if ((pkt_type == QDF_NBUF_TRAC_HOPOPTS_TYPE) &&
2041 (next_hdr == QDF_NBUF_TRAC_ICMPV6_TYPE))
2042 return true;
2043
2044 return false;
2045 }
2046
2047 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_igmp_pkt);
2048
__qdf_nbuf_is_ipv4_igmp_leave_pkt(__qdf_nbuf_t buf)2049 bool __qdf_nbuf_is_ipv4_igmp_leave_pkt(__qdf_nbuf_t buf)
2050 {
2051 qdf_ether_header_t *eh = NULL;
2052 uint16_t ether_type;
2053 uint8_t eth_hdr_size = sizeof(qdf_ether_header_t);
2054
2055 eh = (qdf_ether_header_t *)qdf_nbuf_data(buf);
2056 ether_type = eh->ether_type;
2057
2058 if (ether_type == htons(ETH_P_8021Q)) {
2059 struct vlan_ethhdr *veth =
2060 (struct vlan_ethhdr *)qdf_nbuf_data(buf);
2061 ether_type = veth->h_vlan_encapsulated_proto;
2062 eth_hdr_size = sizeof(struct vlan_ethhdr);
2063 }
2064
2065 if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE)) {
2066 struct iphdr *iph = NULL;
2067 struct igmphdr *ih = NULL;
2068
2069 iph = (struct iphdr *)(qdf_nbuf_data(buf) + eth_hdr_size);
2070 ih = (struct igmphdr *)((uint8_t *)iph + iph->ihl * 4);
2071 switch (ih->type) {
2072 case IGMP_HOST_LEAVE_MESSAGE:
2073 return true;
2074 case IGMPV3_HOST_MEMBERSHIP_REPORT:
2075 {
2076 struct igmpv3_report *ihv3 = (struct igmpv3_report *)ih;
2077 struct igmpv3_grec *grec = NULL;
2078 int num = 0;
2079 int i = 0;
2080 int len = 0;
2081 int type = 0;
2082
2083 num = ntohs(ihv3->ngrec);
2084 for (i = 0; i < num; i++) {
2085 grec = (void *)((uint8_t *)(ihv3->grec) + len);
2086 type = grec->grec_type;
2087 if ((type == IGMPV3_MODE_IS_INCLUDE) ||
2088 (type == IGMPV3_CHANGE_TO_INCLUDE))
2089 return true;
2090
2091 len += sizeof(struct igmpv3_grec);
2092 len += ntohs(grec->grec_nsrcs) * 4;
2093 }
2094 break;
2095 }
2096 default:
2097 break;
2098 }
2099 }
2100
2101 return false;
2102 }
2103
2104 qdf_export_symbol(__qdf_nbuf_is_ipv4_igmp_leave_pkt);
2105
__qdf_nbuf_is_ipv6_igmp_leave_pkt(__qdf_nbuf_t buf)2106 bool __qdf_nbuf_is_ipv6_igmp_leave_pkt(__qdf_nbuf_t buf)
2107 {
2108 qdf_ether_header_t *eh = NULL;
2109 uint16_t ether_type;
2110 uint8_t eth_hdr_size = sizeof(qdf_ether_header_t);
2111
2112 eh = (qdf_ether_header_t *)qdf_nbuf_data(buf);
2113 ether_type = eh->ether_type;
2114
2115 if (ether_type == htons(ETH_P_8021Q)) {
2116 struct vlan_ethhdr *veth =
2117 (struct vlan_ethhdr *)qdf_nbuf_data(buf);
2118 ether_type = veth->h_vlan_encapsulated_proto;
2119 eth_hdr_size = sizeof(struct vlan_ethhdr);
2120 }
2121
2122 if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE)) {
2123 struct ipv6hdr *ip6h = NULL;
2124 struct icmp6hdr *icmp6h = NULL;
2125 uint8_t nexthdr;
2126 uint16_t frag_off = 0;
2127 int offset;
2128 qdf_nbuf_t buf_copy = NULL;
2129
2130 ip6h = (struct ipv6hdr *)(qdf_nbuf_data(buf) + eth_hdr_size);
2131 if (ip6h->nexthdr != IPPROTO_HOPOPTS ||
2132 ip6h->payload_len == 0)
2133 return false;
2134
2135 buf_copy = qdf_nbuf_copy(buf);
2136 if (qdf_likely(!buf_copy))
2137 return false;
2138
2139 nexthdr = ip6h->nexthdr;
2140 offset = ipv6_skip_exthdr(buf_copy,
2141 eth_hdr_size + sizeof(*ip6h),
2142 &nexthdr,
2143 &frag_off);
2144 qdf_nbuf_free(buf_copy);
2145 if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
2146 return false;
2147
2148 icmp6h = (struct icmp6hdr *)(qdf_nbuf_data(buf) + offset);
2149
2150 switch (icmp6h->icmp6_type) {
2151 case ICMPV6_MGM_REDUCTION:
2152 return true;
2153 case ICMPV6_MLD2_REPORT:
2154 {
2155 struct mld2_report *mh = NULL;
2156 struct mld2_grec *grec = NULL;
2157 int num = 0;
2158 int i = 0;
2159 int len = 0;
2160 int type = -1;
2161
2162 mh = (struct mld2_report *)icmp6h;
2163 num = ntohs(mh->mld2r_ngrec);
2164 for (i = 0; i < num; i++) {
2165 grec = (void *)(((uint8_t *)mh->mld2r_grec) +
2166 len);
2167 type = grec->grec_type;
2168 if ((type == MLD2_MODE_IS_INCLUDE) ||
2169 (type == MLD2_CHANGE_TO_INCLUDE))
2170 return true;
2171 else if (type == MLD2_BLOCK_OLD_SOURCES)
2172 return true;
2173
2174 len += sizeof(struct mld2_grec);
2175 len += ntohs(grec->grec_nsrcs) *
2176 sizeof(struct in6_addr);
2177 }
2178 break;
2179 }
2180 default:
2181 break;
2182 }
2183 }
2184
2185 return false;
2186 }
2187
2188 qdf_export_symbol(__qdf_nbuf_is_ipv6_igmp_leave_pkt);
2189
__qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff * skb)2190 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb)
2191 {
2192 uint16_t ether_type;
2193
2194 ether_type = *(uint16_t *)(skb->data +
2195 QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
2196
2197 if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE))
2198 return true;
2199 else
2200 return false;
2201 }
2202 qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt);
2203
__qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t * data)2204 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data)
2205 {
2206 uint16_t ether_type;
2207
2208 ether_type = __qdf_nbuf_get_ether_type(data);
2209
2210 if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE))
2211 return true;
2212 else
2213 return false;
2214 }
2215 qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt);
2216
__qdf_nbuf_data_is_arp_req(uint8_t * data)2217 bool __qdf_nbuf_data_is_arp_req(uint8_t *data)
2218 {
2219 uint16_t op_code;
2220
2221 op_code = (uint16_t)(*(uint16_t *)(data +
2222 QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
2223
2224 if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ))
2225 return true;
2226 return false;
2227 }
2228
__qdf_nbuf_data_is_arp_rsp(uint8_t * data)2229 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data)
2230 {
2231 uint16_t op_code;
2232
2233 op_code = (uint16_t)(*(uint16_t *)(data +
2234 QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
2235
2236 if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY))
2237 return true;
2238 return false;
2239 }
2240
__qdf_nbuf_get_arp_src_ip(uint8_t * data)2241 uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data)
2242 {
2243 uint32_t src_ip;
2244
2245 src_ip = (uint32_t)(*(uint32_t *)(data +
2246 QDF_NBUF_PKT_ARP_SRC_IP_OFFSET));
2247
2248 return src_ip;
2249 }
2250
__qdf_nbuf_get_arp_tgt_ip(uint8_t * data)2251 uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data)
2252 {
2253 uint32_t tgt_ip;
2254
2255 tgt_ip = (uint32_t)(*(uint32_t *)(data +
2256 QDF_NBUF_PKT_ARP_TGT_IP_OFFSET));
2257
2258 return tgt_ip;
2259 }
2260
__qdf_nbuf_get_dns_domain_name(uint8_t * data,uint32_t len)2261 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len)
2262 {
2263 uint8_t *domain_name;
2264
2265 domain_name = (uint8_t *)
2266 (data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET);
2267 return domain_name;
2268 }
2269
__qdf_nbuf_data_is_dns_query(uint8_t * data)2270 bool __qdf_nbuf_data_is_dns_query(uint8_t *data)
2271 {
2272 uint16_t op_code;
2273 uint16_t tgt_port;
2274
2275 tgt_port = (uint16_t)(*(uint16_t *)(data +
2276 QDF_NBUF_PKT_DNS_DST_PORT_OFFSET));
2277 /* Standard DNS query always happen on Dest Port 53. */
2278 if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
2279 op_code = (uint16_t)(*(uint16_t *)(data +
2280 QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
2281 if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
2282 QDF_NBUF_PKT_DNSOP_STANDARD_QUERY)
2283 return true;
2284 }
2285 return false;
2286 }
2287
__qdf_nbuf_data_is_dns_response(uint8_t * data)2288 bool __qdf_nbuf_data_is_dns_response(uint8_t *data)
2289 {
2290 uint16_t op_code;
2291 uint16_t src_port;
2292
2293 src_port = (uint16_t)(*(uint16_t *)(data +
2294 QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET));
2295 /* Standard DNS response always comes on Src Port 53. */
2296 if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
2297 op_code = (uint16_t)(*(uint16_t *)(data +
2298 QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
2299
2300 if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
2301 QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE)
2302 return true;
2303 }
2304 return false;
2305 }
2306
__qdf_nbuf_data_is_tcp_fin(uint8_t * data)2307 bool __qdf_nbuf_data_is_tcp_fin(uint8_t *data)
2308 {
2309 uint8_t op_code;
2310
2311 op_code = (uint8_t)(*(uint8_t *)(data +
2312 QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2313
2314 if (op_code == QDF_NBUF_PKT_TCPOP_FIN)
2315 return true;
2316
2317 return false;
2318 }
2319
__qdf_nbuf_data_is_tcp_fin_ack(uint8_t * data)2320 bool __qdf_nbuf_data_is_tcp_fin_ack(uint8_t *data)
2321 {
2322 uint8_t op_code;
2323
2324 op_code = (uint8_t)(*(uint8_t *)(data +
2325 QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2326
2327 if (op_code == QDF_NBUF_PKT_TCPOP_FIN_ACK)
2328 return true;
2329
2330 return false;
2331 }
2332
__qdf_nbuf_data_is_tcp_syn(uint8_t * data)2333 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data)
2334 {
2335 uint8_t op_code;
2336
2337 op_code = (uint8_t)(*(uint8_t *)(data +
2338 QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2339
2340 if (op_code == QDF_NBUF_PKT_TCPOP_SYN)
2341 return true;
2342 return false;
2343 }
2344
__qdf_nbuf_data_is_tcp_syn_ack(uint8_t * data)2345 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data)
2346 {
2347 uint8_t op_code;
2348
2349 op_code = (uint8_t)(*(uint8_t *)(data +
2350 QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2351
2352 if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK)
2353 return true;
2354 return false;
2355 }
2356
__qdf_nbuf_data_is_tcp_rst(uint8_t * data)2357 bool __qdf_nbuf_data_is_tcp_rst(uint8_t *data)
2358 {
2359 uint8_t op_code;
2360
2361 op_code = (uint8_t)(*(uint8_t *)(data +
2362 QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2363
2364 if (op_code == QDF_NBUF_PKT_TCPOP_RST)
2365 return true;
2366
2367 return false;
2368 }
2369
__qdf_nbuf_data_is_tcp_ack(uint8_t * data)2370 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data)
2371 {
2372 uint8_t op_code;
2373
2374 op_code = (uint8_t)(*(uint8_t *)(data +
2375 QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2376
2377 if (op_code == QDF_NBUF_PKT_TCPOP_ACK)
2378 return true;
2379 return false;
2380 }
2381
__qdf_nbuf_data_get_tcp_src_port(uint8_t * data)2382 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data)
2383 {
2384 uint16_t src_port;
2385
2386 src_port = (uint16_t)(*(uint16_t *)(data +
2387 QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET));
2388
2389 return src_port;
2390 }
2391
__qdf_nbuf_data_get_tcp_dst_port(uint8_t * data)2392 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data)
2393 {
2394 uint16_t tgt_port;
2395
2396 tgt_port = (uint16_t)(*(uint16_t *)(data +
2397 QDF_NBUF_PKT_TCP_DST_PORT_OFFSET));
2398
2399 return tgt_port;
2400 }
2401
__qdf_nbuf_data_is_icmpv4_req(uint8_t * data)2402 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data)
2403 {
2404 uint8_t op_code;
2405
2406 op_code = (uint8_t)(*(uint8_t *)(data +
2407 QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
2408
2409 if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ)
2410 return true;
2411 return false;
2412 }
2413
__qdf_nbuf_data_is_icmpv4_rsp(uint8_t * data)2414 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data)
2415 {
2416 uint8_t op_code;
2417
2418 op_code = (uint8_t)(*(uint8_t *)(data +
2419 QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
2420
2421 if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY)
2422 return true;
2423 return false;
2424 }
2425
__qdf_nbuf_data_is_icmpv4_redirect(uint8_t * data)2426 bool __qdf_nbuf_data_is_icmpv4_redirect(uint8_t *data)
2427 {
2428 uint8_t op_code;
2429
2430 op_code = (uint8_t)(*(uint8_t *)(data +
2431 QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
2432
2433 if (op_code == QDF_NBUF_PKT_ICMPV4_REDIRECT)
2434 return true;
2435 return false;
2436 }
2437
2438 qdf_export_symbol(__qdf_nbuf_data_is_icmpv4_redirect);
2439
__qdf_nbuf_data_is_icmpv6_redirect(uint8_t * data)2440 bool __qdf_nbuf_data_is_icmpv6_redirect(uint8_t *data)
2441 {
2442 uint8_t subtype;
2443
2444 subtype = (uint8_t)(*(uint8_t *)(data + ICMPV6_SUBTYPE_OFFSET));
2445
2446 if (subtype == ICMPV6_REDIRECT)
2447 return true;
2448 return false;
2449 }
2450
2451 qdf_export_symbol(__qdf_nbuf_data_is_icmpv6_redirect);
2452
__qdf_nbuf_get_icmpv4_src_ip(uint8_t * data)2453 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data)
2454 {
2455 uint32_t src_ip;
2456
2457 src_ip = (uint32_t)(*(uint32_t *)(data +
2458 QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET));
2459
2460 return src_ip;
2461 }
2462
__qdf_nbuf_get_icmpv4_tgt_ip(uint8_t * data)2463 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data)
2464 {
2465 uint32_t tgt_ip;
2466
2467 tgt_ip = (uint32_t)(*(uint32_t *)(data +
2468 QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET));
2469
2470 return tgt_ip;
2471 }
2472
__qdf_nbuf_data_is_ipv6_pkt(uint8_t * data)2473 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data)
2474 {
2475 uint16_t ether_type;
2476
2477 ether_type = (uint16_t)(*(uint16_t *)(data +
2478 QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
2479
2480 if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
2481 return true;
2482 else
2483 return false;
2484 }
2485 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt);
2486
__qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t * data)2487 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data)
2488 {
2489 uint16_t sport;
2490 uint16_t dport;
2491 uint8_t ipv6_offset;
2492
2493 if (!__qdf_nbuf_data_is_ipv6_pkt(data))
2494 return false;
2495
2496 ipv6_offset = __qdf_nbuf_get_ip_offset(data);
2497 sport = *(uint16_t *)(data + ipv6_offset +
2498 QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
2499 dport = *(uint16_t *)(data + ipv6_offset +
2500 QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
2501 sizeof(uint16_t));
2502
2503 if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) &&
2504 (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) ||
2505 ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) &&
2506 (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT))))
2507 return true;
2508 else
2509 return false;
2510 }
2511 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt);
2512
__qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t * data)2513 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data)
2514 {
2515 uint16_t sport;
2516 uint16_t dport;
2517
2518 sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
2519 QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
2520 dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
2521 QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
2522 sizeof(uint16_t));
2523
2524 if (sport == QDF_SWAP_U16(QDF_NBUF_TRAC_MDNS_SRC_N_DST_PORT) &&
2525 dport == sport)
2526 return true;
2527 else
2528 return false;
2529 }
2530
2531 qdf_export_symbol(__qdf_nbuf_data_is_ipv6_mdns_pkt);
2532
__qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t * data)2533 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data)
2534 {
2535 if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2536 uint32_t *dst_addr =
2537 (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET);
2538
2539 /*
2540 * Check first word of the IPV4 address and if it is
2541 * equal to 0xE then it represents multicast IP.
2542 */
2543 if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) ==
2544 QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK)
2545 return true;
2546 else
2547 return false;
2548 } else
2549 return false;
2550 }
2551
__qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t * data)2552 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data)
2553 {
2554 if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2555 uint16_t *dst_addr;
2556
2557 dst_addr = (uint16_t *)
2558 (data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET);
2559
2560 /*
2561 * Check first byte of the IP address and if it
2562 * 0xFF00 then it is a IPV6 mcast packet.
2563 */
2564 if (*dst_addr ==
2565 QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR))
2566 return true;
2567 else
2568 return false;
2569 } else
2570 return false;
2571 }
2572
__qdf_nbuf_data_is_icmp_pkt(uint8_t * data)2573 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data)
2574 {
2575 if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2576 uint8_t pkt_type;
2577
2578 pkt_type = (uint8_t)(*(uint8_t *)(data +
2579 QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2580
2581 if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE)
2582 return true;
2583 else
2584 return false;
2585 } else
2586 return false;
2587 }
2588
2589 qdf_export_symbol(__qdf_nbuf_data_is_icmp_pkt);
2590
__qdf_nbuf_data_is_icmpv6_pkt(uint8_t * data)2591 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data)
2592 {
2593 if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2594 uint8_t pkt_type;
2595
2596 pkt_type = (uint8_t)(*(uint8_t *)(data +
2597 QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2598
2599 if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
2600 return true;
2601 else
2602 return false;
2603 } else
2604 return false;
2605 }
2606
2607 qdf_export_symbol(__qdf_nbuf_data_is_icmpv6_pkt);
2608
__qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t * data)2609 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data)
2610 {
2611 if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2612 uint8_t pkt_type;
2613
2614 pkt_type = (uint8_t)(*(uint8_t *)(data +
2615 QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2616
2617 if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2618 return true;
2619 else
2620 return false;
2621 } else
2622 return false;
2623 }
2624
__qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t * data)2625 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data)
2626 {
2627 if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2628 uint8_t pkt_type;
2629
2630 pkt_type = (uint8_t)(*(uint8_t *)(data +
2631 QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2632
2633 if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2634 return true;
2635 else
2636 return false;
2637 } else
2638 return false;
2639 }
2640
__qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t * data)2641 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data)
2642 {
2643 if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2644 uint8_t pkt_type;
2645
2646 pkt_type = (uint8_t)(*(uint8_t *)(data +
2647 QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2648
2649 if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2650 return true;
2651 else
2652 return false;
2653 } else
2654 return false;
2655 }
2656
__qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t * data)2657 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data)
2658 {
2659 if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2660 uint8_t pkt_type;
2661
2662 pkt_type = (uint8_t)(*(uint8_t *)(data +
2663 QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2664
2665 if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2666 return true;
2667 else
2668 return false;
2669 } else
2670 return false;
2671 }
2672
__qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)2673 bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)
2674 {
2675 struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
2676 return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest);
2677 }
2678 qdf_export_symbol(__qdf_nbuf_is_bcast_pkt);
2679
__qdf_nbuf_is_mcast_replay(qdf_nbuf_t nbuf)2680 bool __qdf_nbuf_is_mcast_replay(qdf_nbuf_t nbuf)
2681 {
2682 struct sk_buff *skb = (struct sk_buff *)nbuf;
2683 struct ethhdr *eth = eth_hdr(skb);
2684
2685 if (qdf_likely(skb->pkt_type != PACKET_MULTICAST))
2686 return false;
2687
2688 if (qdf_unlikely(ether_addr_equal(eth->h_source, skb->dev->dev_addr)))
2689 return true;
2690
2691 return false;
2692 }
2693
__qdf_nbuf_is_arp_local(struct sk_buff * skb)2694 bool __qdf_nbuf_is_arp_local(struct sk_buff *skb)
2695 {
2696 struct arphdr *arp;
2697 struct in_ifaddr **ifap = NULL;
2698 struct in_ifaddr *ifa = NULL;
2699 struct in_device *in_dev;
2700 unsigned char *arp_ptr;
2701 __be32 tip;
2702
2703 arp = (struct arphdr *)skb->data;
2704 if (arp->ar_op == htons(ARPOP_REQUEST)) {
2705 /* if fail to acquire rtnl lock, assume it's local arp */
2706 if (!rtnl_trylock())
2707 return true;
2708
2709 in_dev = __in_dev_get_rtnl(skb->dev);
2710 if (in_dev) {
2711 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
2712 ifap = &ifa->ifa_next) {
2713 if (!strcmp(skb->dev->name, ifa->ifa_label))
2714 break;
2715 }
2716 }
2717
2718 if (ifa && ifa->ifa_local) {
2719 arp_ptr = (unsigned char *)(arp + 1);
2720 arp_ptr += (skb->dev->addr_len + 4 +
2721 skb->dev->addr_len);
2722 memcpy(&tip, arp_ptr, 4);
2723 qdf_debug("ARP packet: local IP: %x dest IP: %x",
2724 ifa->ifa_local, tip);
2725 if (ifa->ifa_local == tip) {
2726 rtnl_unlock();
2727 return true;
2728 }
2729 }
2730 rtnl_unlock();
2731 }
2732
2733 return false;
2734 }
2735
2736 /**
2737 * __qdf_nbuf_data_get_tcp_hdr_len() - get TCP header length
2738 * @data: pointer to data of network buffer
2739 * @tcp_hdr_len_offset: bytes offset for tcp header length of ethernet packets
2740 *
2741 * Return: TCP header length in unit of byte
2742 */
2743 static inline
__qdf_nbuf_data_get_tcp_hdr_len(uint8_t * data,uint8_t tcp_hdr_len_offset)2744 uint8_t __qdf_nbuf_data_get_tcp_hdr_len(uint8_t *data,
2745 uint8_t tcp_hdr_len_offset)
2746 {
2747 uint8_t tcp_hdr_len;
2748
2749 tcp_hdr_len =
2750 *((uint8_t *)(data + tcp_hdr_len_offset));
2751
2752 tcp_hdr_len = ((tcp_hdr_len & QDF_NBUF_PKT_TCP_HDR_LEN_MASK) >>
2753 QDF_NBUF_PKT_TCP_HDR_LEN_LSB) *
2754 QDF_NBUF_PKT_TCP_HDR_LEN_UNIT;
2755
2756 return tcp_hdr_len;
2757 }
2758
__qdf_nbuf_is_ipv4_v6_pure_tcp_ack(struct sk_buff * skb)2759 bool __qdf_nbuf_is_ipv4_v6_pure_tcp_ack(struct sk_buff *skb)
2760 {
2761 bool is_tcp_ack = false;
2762 uint8_t op_code, tcp_hdr_len;
2763 uint16_t ip_payload_len;
2764 uint8_t *data = skb->data;
2765
2766 /*
2767 * If packet length > TCP ACK max length or it's nonlinearized,
2768 * then it must not be TCP ACK.
2769 */
2770 if (qdf_nbuf_len(skb) > QDF_NBUF_PKT_TCP_ACK_MAX_LEN ||
2771 qdf_nbuf_is_nonlinear(skb))
2772 return false;
2773
2774 if (qdf_nbuf_is_ipv4_tcp_pkt(skb)) {
2775 ip_payload_len =
2776 QDF_SWAP_U16(*((uint16_t *)(data +
2777 QDF_NBUF_TRAC_IPV4_TOTAL_LEN_OFFSET)))
2778 - QDF_NBUF_TRAC_IPV4_HEADER_SIZE;
2779
2780 tcp_hdr_len = __qdf_nbuf_data_get_tcp_hdr_len(
2781 data,
2782 QDF_NBUF_PKT_IPV4_TCP_HDR_LEN_OFFSET);
2783
2784 op_code = (uint8_t)(*(uint8_t *)(data +
2785 QDF_NBUF_PKT_IPV4_TCP_OPCODE_OFFSET));
2786
2787 if (ip_payload_len == tcp_hdr_len &&
2788 op_code == QDF_NBUF_PKT_TCPOP_ACK)
2789 is_tcp_ack = true;
2790
2791 } else if (qdf_nbuf_is_ipv6_tcp_pkt(skb)) {
2792 ip_payload_len =
2793 QDF_SWAP_U16(*((uint16_t *)(data +
2794 QDF_NBUF_TRAC_IPV6_PAYLOAD_LEN_OFFSET)));
2795
2796 tcp_hdr_len = __qdf_nbuf_data_get_tcp_hdr_len(
2797 data,
2798 QDF_NBUF_PKT_IPV6_TCP_HDR_LEN_OFFSET);
2799 op_code = (uint8_t)(*(uint8_t *)(data +
2800 QDF_NBUF_PKT_IPV6_TCP_OPCODE_OFFSET));
2801
2802 if (ip_payload_len == tcp_hdr_len &&
2803 op_code == QDF_NBUF_PKT_TCPOP_ACK)
2804 is_tcp_ack = true;
2805 }
2806
2807 return is_tcp_ack;
2808 }
2809
2810 #ifdef QCA_DP_NBUF_FAST_RECYCLE_CHECK
qdf_nbuf_fast_xmit(qdf_nbuf_t nbuf)2811 bool qdf_nbuf_fast_xmit(qdf_nbuf_t nbuf)
2812 {
2813 return nbuf->fast_xmit;
2814 }
2815
2816 qdf_export_symbol(qdf_nbuf_fast_xmit);
2817
qdf_nbuf_set_fast_xmit(qdf_nbuf_t nbuf,int value)2818 void qdf_nbuf_set_fast_xmit(qdf_nbuf_t nbuf, int value)
2819 {
2820 nbuf->fast_xmit = value;
2821 }
2822
2823 qdf_export_symbol(qdf_nbuf_set_fast_xmit);
2824 #else
qdf_nbuf_fast_xmit(qdf_nbuf_t nbuf)2825 bool qdf_nbuf_fast_xmit(qdf_nbuf_t nbuf)
2826 {
2827 return false;
2828 }
2829
2830 qdf_export_symbol(qdf_nbuf_fast_xmit);
2831
qdf_nbuf_set_fast_xmit(qdf_nbuf_t nbuf,int value)2832 void qdf_nbuf_set_fast_xmit(qdf_nbuf_t nbuf, int value)
2833 {
2834 }
2835
2836 qdf_export_symbol(qdf_nbuf_set_fast_xmit);
2837 #endif
2838
2839 #ifdef NBUF_MEMORY_DEBUG
2840
2841 static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE];
2842
2843 static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
2844 static struct kmem_cache *nbuf_tracking_cache;
2845 static QDF_NBUF_TRACK *qdf_net_buf_track_free_list;
2846 static spinlock_t qdf_net_buf_track_free_list_lock;
2847 static uint32_t qdf_net_buf_track_free_list_count;
2848 static uint32_t qdf_net_buf_track_used_list_count;
2849 static uint32_t qdf_net_buf_track_max_used;
2850 static uint32_t qdf_net_buf_track_max_free;
2851 static uint32_t qdf_net_buf_track_max_allocated;
2852 static uint32_t qdf_net_buf_track_fail_count;
2853
2854 /**
2855 * update_max_used() - update qdf_net_buf_track_max_used tracking variable
2856 *
2857 * tracks the max number of network buffers that the wlan driver was tracking
2858 * at any one time.
2859 *
2860 * Return: none
2861 */
update_max_used(void)2862 static inline void update_max_used(void)
2863 {
2864 int sum;
2865
2866 if (qdf_net_buf_track_max_used <
2867 qdf_net_buf_track_used_list_count)
2868 qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count;
2869 sum = qdf_net_buf_track_free_list_count +
2870 qdf_net_buf_track_used_list_count;
2871 if (qdf_net_buf_track_max_allocated < sum)
2872 qdf_net_buf_track_max_allocated = sum;
2873 }
2874
2875 /**
2876 * update_max_free() - update qdf_net_buf_track_free_list_count
2877 *
2878 * tracks the max number tracking buffers kept in the freelist.
2879 *
2880 * Return: none
2881 */
update_max_free(void)2882 static inline void update_max_free(void)
2883 {
2884 if (qdf_net_buf_track_max_free <
2885 qdf_net_buf_track_free_list_count)
2886 qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count;
2887 }
2888
2889 /**
2890 * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan
2891 *
2892 * This function pulls from a freelist if possible and uses kmem_cache_alloc.
2893 * This function also ads fexibility to adjust the allocation and freelist
2894 * scheems.
2895 *
2896 * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed.
2897 */
qdf_nbuf_track_alloc(void)2898 static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void)
2899 {
2900 int flags = GFP_KERNEL;
2901 unsigned long irq_flag;
2902 QDF_NBUF_TRACK *new_node = NULL;
2903
2904 spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2905 qdf_net_buf_track_used_list_count++;
2906 if (qdf_net_buf_track_free_list) {
2907 new_node = qdf_net_buf_track_free_list;
2908 qdf_net_buf_track_free_list =
2909 qdf_net_buf_track_free_list->p_next;
2910 qdf_net_buf_track_free_list_count--;
2911 }
2912 update_max_used();
2913 spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2914
2915 if (new_node)
2916 return new_node;
2917
2918 if (in_interrupt() || irqs_disabled() || in_atomic())
2919 flags = GFP_ATOMIC;
2920
2921 return kmem_cache_alloc(nbuf_tracking_cache, flags);
2922 }
2923
2924 /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */
2925 #define FREEQ_POOLSIZE 2048
2926
2927 /**
2928 * qdf_nbuf_track_free() - free the nbuf tracking cookie.
2929 * @node: nbuf tracking node
2930 *
2931 * Matches calls to qdf_nbuf_track_alloc.
2932 * Either frees the tracking cookie to kernel or an internal
2933 * freelist based on the size of the freelist.
2934 *
2935 * Return: none
2936 */
qdf_nbuf_track_free(QDF_NBUF_TRACK * node)2937 static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node)
2938 {
2939 unsigned long irq_flag;
2940
2941 if (!node)
2942 return;
2943
2944 /* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE
2945 * only shrink the freelist if it is bigger than twice the number of
2946 * nbufs in use. If the driver is stalling in a consistent bursty
2947 * fashion, this will keep 3/4 of thee allocations from the free list
2948 * while also allowing the system to recover memory as less frantic
2949 * traffic occurs.
2950 */
2951
2952 spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2953
2954 qdf_net_buf_track_used_list_count--;
2955 if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2956 (qdf_net_buf_track_free_list_count >
2957 qdf_net_buf_track_used_list_count << 1)) {
2958 kmem_cache_free(nbuf_tracking_cache, node);
2959 } else {
2960 node->p_next = qdf_net_buf_track_free_list;
2961 qdf_net_buf_track_free_list = node;
2962 qdf_net_buf_track_free_list_count++;
2963 }
2964 update_max_free();
2965 spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2966 }
2967
2968 /**
2969 * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist
2970 *
2971 * Removes a 'warmup time' characteristic of the freelist. Prefilling
2972 * the freelist first makes it performant for the first iperf udp burst
2973 * as well as steady state.
2974 *
2975 * Return: None
2976 */
qdf_nbuf_track_prefill(void)2977 static void qdf_nbuf_track_prefill(void)
2978 {
2979 int i;
2980 QDF_NBUF_TRACK *node, *head;
2981
2982 /* prepopulate the freelist */
2983 head = NULL;
2984 for (i = 0; i < FREEQ_POOLSIZE; i++) {
2985 node = qdf_nbuf_track_alloc();
2986 if (!node)
2987 continue;
2988 node->p_next = head;
2989 head = node;
2990 }
2991 while (head) {
2992 node = head->p_next;
2993 qdf_nbuf_track_free(head);
2994 head = node;
2995 }
2996
2997 /* prefilled buffers should not count as used */
2998 qdf_net_buf_track_max_used = 0;
2999 }
3000
3001 /**
3002 * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies
3003 *
3004 * This initializes the memory manager for the nbuf tracking cookies. Because
3005 * these cookies are all the same size and only used in this feature, we can
3006 * use a kmem_cache to provide tracking as well as to speed up allocations.
3007 * To avoid the overhead of allocating and freeing the buffers (including SLUB
3008 * features) a freelist is prepopulated here.
3009 *
3010 * Return: None
3011 */
qdf_nbuf_track_memory_manager_create(void)3012 static void qdf_nbuf_track_memory_manager_create(void)
3013 {
3014 spin_lock_init(&qdf_net_buf_track_free_list_lock);
3015 nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache",
3016 sizeof(QDF_NBUF_TRACK),
3017 0, 0, NULL);
3018
3019 qdf_nbuf_track_prefill();
3020 }
3021
3022 /**
3023 * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies
3024 *
3025 * Empty the freelist and print out usage statistics when it is no longer
3026 * needed. Also the kmem_cache should be destroyed here so that it can warn if
3027 * any nbuf tracking cookies were leaked.
3028 *
3029 * Return: None
3030 */
qdf_nbuf_track_memory_manager_destroy(void)3031 static void qdf_nbuf_track_memory_manager_destroy(void)
3032 {
3033 QDF_NBUF_TRACK *node, *tmp;
3034 unsigned long irq_flag;
3035
3036 spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
3037 node = qdf_net_buf_track_free_list;
3038
3039 if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4)
3040 qdf_print("%s: unexpectedly large max_used count %d",
3041 __func__, qdf_net_buf_track_max_used);
3042
3043 if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated)
3044 qdf_print("%s: %d unused trackers were allocated",
3045 __func__,
3046 qdf_net_buf_track_max_allocated -
3047 qdf_net_buf_track_max_used);
3048
3049 if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
3050 qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4)
3051 qdf_print("%s: check freelist shrinking functionality",
3052 __func__);
3053
3054 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
3055 "%s: %d residual freelist size",
3056 __func__, qdf_net_buf_track_free_list_count);
3057
3058 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
3059 "%s: %d max freelist size observed",
3060 __func__, qdf_net_buf_track_max_free);
3061
3062 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
3063 "%s: %d max buffers used observed",
3064 __func__, qdf_net_buf_track_max_used);
3065
3066 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
3067 "%s: %d max buffers allocated observed",
3068 __func__, qdf_net_buf_track_max_allocated);
3069
3070 while (node) {
3071 tmp = node;
3072 node = node->p_next;
3073 kmem_cache_free(nbuf_tracking_cache, tmp);
3074 qdf_net_buf_track_free_list_count--;
3075 }
3076
3077 if (qdf_net_buf_track_free_list_count != 0)
3078 qdf_info("%d unfreed tracking memory lost in freelist",
3079 qdf_net_buf_track_free_list_count);
3080
3081 if (qdf_net_buf_track_used_list_count != 0)
3082 qdf_info("%d unfreed tracking memory still in use",
3083 qdf_net_buf_track_used_list_count);
3084
3085 spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
3086 kmem_cache_destroy(nbuf_tracking_cache);
3087 qdf_net_buf_track_free_list = NULL;
3088 }
3089
qdf_net_buf_debug_init(void)3090 void qdf_net_buf_debug_init(void)
3091 {
3092 uint32_t i;
3093
3094 is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
3095
3096 if (is_initial_mem_debug_disabled)
3097 return;
3098
3099 qdf_atomic_set(&qdf_nbuf_history_index, -1);
3100
3101 qdf_nbuf_map_tracking_init();
3102 qdf_nbuf_smmu_map_tracking_init();
3103 qdf_nbuf_track_memory_manager_create();
3104
3105 for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
3106 gp_qdf_net_buf_track_tbl[i] = NULL;
3107 spin_lock_init(&g_qdf_net_buf_track_lock[i]);
3108 }
3109 }
3110 qdf_export_symbol(qdf_net_buf_debug_init);
3111
qdf_net_buf_debug_exit(void)3112 void qdf_net_buf_debug_exit(void)
3113 {
3114 uint32_t i;
3115 uint32_t count = 0;
3116 unsigned long irq_flag;
3117 QDF_NBUF_TRACK *p_node;
3118 QDF_NBUF_TRACK *p_prev;
3119
3120 if (is_initial_mem_debug_disabled)
3121 return;
3122
3123 for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
3124 spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3125 p_node = gp_qdf_net_buf_track_tbl[i];
3126 while (p_node) {
3127 p_prev = p_node;
3128 p_node = p_node->p_next;
3129 count++;
3130 qdf_info("SKB buf memory Leak@ Func %s, @Line %d, size %zu, nbuf %pK",
3131 p_prev->func_name, p_prev->line_num,
3132 p_prev->size, p_prev->net_buf);
3133 qdf_info("SKB leak map %s, line %d, unmap %s line %d mapped=%d",
3134 p_prev->map_func_name,
3135 p_prev->map_line_num,
3136 p_prev->unmap_func_name,
3137 p_prev->unmap_line_num,
3138 p_prev->is_nbuf_mapped);
3139 qdf_nbuf_track_free(p_prev);
3140 }
3141 spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3142 }
3143
3144 qdf_nbuf_track_memory_manager_destroy();
3145 qdf_nbuf_map_tracking_deinit();
3146 qdf_nbuf_smmu_map_tracking_deinit();
3147
3148 #ifdef CONFIG_HALT_KMEMLEAK
3149 if (count) {
3150 qdf_err("%d SKBs leaked .. please fix the SKB leak", count);
3151 QDF_BUG(0);
3152 }
3153 #endif
3154 }
3155 qdf_export_symbol(qdf_net_buf_debug_exit);
3156
3157 /**
3158 * qdf_net_buf_debug_hash() - hash network buffer pointer
3159 * @net_buf: network buffer
3160 *
3161 * Return: hash value
3162 */
qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)3163 static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
3164 {
3165 uint32_t i;
3166
3167 i = (uint32_t) (((uintptr_t) net_buf) >> 4);
3168 i += (uint32_t) (((uintptr_t) net_buf) >> 14);
3169 i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1);
3170
3171 return i;
3172 }
3173
3174 /**
3175 * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
3176 * @net_buf: network buffer
3177 *
3178 * Return: If skb is found in hash table then return pointer to network buffer
3179 * else return %NULL
3180 */
qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)3181 static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
3182 {
3183 uint32_t i;
3184 QDF_NBUF_TRACK *p_node;
3185
3186 i = qdf_net_buf_debug_hash(net_buf);
3187 p_node = gp_qdf_net_buf_track_tbl[i];
3188
3189 while (p_node) {
3190 if (p_node->net_buf == net_buf)
3191 return p_node;
3192 p_node = p_node->p_next;
3193 }
3194
3195 return NULL;
3196 }
3197
qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf,size_t size,const char * func_name,uint32_t line_num)3198 void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
3199 const char *func_name, uint32_t line_num)
3200 {
3201 uint32_t i;
3202 unsigned long irq_flag;
3203 QDF_NBUF_TRACK *p_node;
3204 QDF_NBUF_TRACK *new_node;
3205
3206 if (is_initial_mem_debug_disabled)
3207 return;
3208
3209 new_node = qdf_nbuf_track_alloc();
3210
3211 i = qdf_net_buf_debug_hash(net_buf);
3212 spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3213
3214 p_node = qdf_net_buf_debug_look_up(net_buf);
3215
3216 if (p_node) {
3217 qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d",
3218 p_node->net_buf, p_node->func_name, p_node->line_num,
3219 net_buf, func_name, line_num);
3220 qdf_nbuf_track_free(new_node);
3221 } else {
3222 p_node = new_node;
3223 if (p_node) {
3224 p_node->net_buf = net_buf;
3225 qdf_str_lcopy(p_node->func_name, func_name,
3226 QDF_MEM_FUNC_NAME_SIZE);
3227 p_node->line_num = line_num;
3228 p_node->is_nbuf_mapped = false;
3229 p_node->map_line_num = 0;
3230 p_node->unmap_line_num = 0;
3231 p_node->map_func_name[0] = '\0';
3232 p_node->unmap_func_name[0] = '\0';
3233 p_node->size = size;
3234 p_node->time = qdf_get_log_timestamp();
3235 qdf_net_buf_update_smmu_params(p_node);
3236 qdf_mem_skb_inc(size);
3237 p_node->p_next = gp_qdf_net_buf_track_tbl[i];
3238 gp_qdf_net_buf_track_tbl[i] = p_node;
3239 } else {
3240 qdf_net_buf_track_fail_count++;
3241 qdf_print(
3242 "Mem alloc failed ! Could not track skb from %s %d of size %zu",
3243 func_name, line_num, size);
3244 }
3245 }
3246
3247 spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3248 }
3249 qdf_export_symbol(qdf_net_buf_debug_add_node);
3250
qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf,const char * func_name,uint32_t line_num)3251 void qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf, const char *func_name,
3252 uint32_t line_num)
3253 {
3254 uint32_t i;
3255 unsigned long irq_flag;
3256 QDF_NBUF_TRACK *p_node;
3257
3258 if (is_initial_mem_debug_disabled)
3259 return;
3260
3261 i = qdf_net_buf_debug_hash(net_buf);
3262 spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3263
3264 p_node = qdf_net_buf_debug_look_up(net_buf);
3265
3266 if (p_node) {
3267 qdf_str_lcopy(p_node->func_name, kbasename(func_name),
3268 QDF_MEM_FUNC_NAME_SIZE);
3269 p_node->line_num = line_num;
3270 }
3271
3272 spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3273 }
3274
3275 qdf_export_symbol(qdf_net_buf_debug_update_node);
3276
qdf_net_buf_debug_update_map_node(qdf_nbuf_t net_buf,const char * func_name,uint32_t line_num)3277 void qdf_net_buf_debug_update_map_node(qdf_nbuf_t net_buf,
3278 const char *func_name,
3279 uint32_t line_num)
3280 {
3281 uint32_t i;
3282 unsigned long irq_flag;
3283 QDF_NBUF_TRACK *p_node;
3284
3285 if (is_initial_mem_debug_disabled)
3286 return;
3287
3288 i = qdf_net_buf_debug_hash(net_buf);
3289 spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3290
3291 p_node = qdf_net_buf_debug_look_up(net_buf);
3292
3293 if (p_node) {
3294 qdf_str_lcopy(p_node->map_func_name, func_name,
3295 QDF_MEM_FUNC_NAME_SIZE);
3296 p_node->map_line_num = line_num;
3297 p_node->is_nbuf_mapped = true;
3298 }
3299 spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3300 }
3301
3302 #ifdef NBUF_SMMU_MAP_UNMAP_DEBUG
qdf_net_buf_debug_update_smmu_map_node(qdf_nbuf_t nbuf,unsigned long iova,unsigned long pa,const char * func,uint32_t line)3303 void qdf_net_buf_debug_update_smmu_map_node(qdf_nbuf_t nbuf,
3304 unsigned long iova,
3305 unsigned long pa,
3306 const char *func,
3307 uint32_t line)
3308 {
3309 uint32_t i;
3310 unsigned long irq_flag;
3311 QDF_NBUF_TRACK *p_node;
3312
3313 if (is_initial_mem_debug_disabled)
3314 return;
3315
3316 i = qdf_net_buf_debug_hash(nbuf);
3317 spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3318
3319 p_node = qdf_net_buf_debug_look_up(nbuf);
3320
3321 if (p_node) {
3322 qdf_str_lcopy(p_node->smmu_map_func_name, func,
3323 QDF_MEM_FUNC_NAME_SIZE);
3324 p_node->smmu_map_line_num = line;
3325 p_node->is_nbuf_smmu_mapped = true;
3326 p_node->smmu_map_iova_addr = iova;
3327 p_node->smmu_map_pa_addr = pa;
3328 }
3329 spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3330 }
3331
qdf_net_buf_debug_update_smmu_unmap_node(qdf_nbuf_t nbuf,unsigned long iova,unsigned long pa,const char * func,uint32_t line)3332 void qdf_net_buf_debug_update_smmu_unmap_node(qdf_nbuf_t nbuf,
3333 unsigned long iova,
3334 unsigned long pa,
3335 const char *func,
3336 uint32_t line)
3337 {
3338 uint32_t i;
3339 unsigned long irq_flag;
3340 QDF_NBUF_TRACK *p_node;
3341
3342 if (is_initial_mem_debug_disabled)
3343 return;
3344
3345 i = qdf_net_buf_debug_hash(nbuf);
3346 spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3347
3348 p_node = qdf_net_buf_debug_look_up(nbuf);
3349
3350 if (p_node) {
3351 qdf_str_lcopy(p_node->smmu_unmap_func_name, func,
3352 QDF_MEM_FUNC_NAME_SIZE);
3353 p_node->smmu_unmap_line_num = line;
3354 p_node->is_nbuf_smmu_mapped = false;
3355 p_node->smmu_unmap_iova_addr = iova;
3356 p_node->smmu_unmap_pa_addr = pa;
3357 }
3358 spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3359 }
3360 #endif
3361
qdf_net_buf_debug_update_unmap_node(qdf_nbuf_t net_buf,const char * func_name,uint32_t line_num)3362 void qdf_net_buf_debug_update_unmap_node(qdf_nbuf_t net_buf,
3363 const char *func_name,
3364 uint32_t line_num)
3365 {
3366 uint32_t i;
3367 unsigned long irq_flag;
3368 QDF_NBUF_TRACK *p_node;
3369
3370 if (is_initial_mem_debug_disabled)
3371 return;
3372
3373 i = qdf_net_buf_debug_hash(net_buf);
3374 spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3375
3376 p_node = qdf_net_buf_debug_look_up(net_buf);
3377
3378 if (p_node) {
3379 qdf_str_lcopy(p_node->unmap_func_name, func_name,
3380 QDF_MEM_FUNC_NAME_SIZE);
3381 p_node->unmap_line_num = line_num;
3382 p_node->is_nbuf_mapped = false;
3383 }
3384 spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3385 }
3386
qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)3387 void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
3388 {
3389 uint32_t i;
3390 QDF_NBUF_TRACK *p_head;
3391 QDF_NBUF_TRACK *p_node = NULL;
3392 unsigned long irq_flag;
3393 QDF_NBUF_TRACK *p_prev;
3394
3395 if (is_initial_mem_debug_disabled)
3396 return;
3397
3398 i = qdf_net_buf_debug_hash(net_buf);
3399 spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3400
3401 p_head = gp_qdf_net_buf_track_tbl[i];
3402
3403 /* Unallocated SKB */
3404 if (!p_head)
3405 goto done;
3406
3407 p_node = p_head;
3408 /* Found at head of the table */
3409 if (p_head->net_buf == net_buf) {
3410 gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
3411 goto done;
3412 }
3413
3414 /* Search in collision list */
3415 while (p_node) {
3416 p_prev = p_node;
3417 p_node = p_node->p_next;
3418 if ((p_node) && (p_node->net_buf == net_buf)) {
3419 p_prev->p_next = p_node->p_next;
3420 break;
3421 }
3422 }
3423
3424 done:
3425 spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3426
3427 if (p_node) {
3428 qdf_mem_skb_dec(p_node->size);
3429 qdf_nbuf_track_free(p_node);
3430 } else {
3431 if (qdf_net_buf_track_fail_count) {
3432 qdf_print("Untracked net_buf free: %pK with tracking failures count: %u",
3433 net_buf, qdf_net_buf_track_fail_count);
3434 } else
3435 QDF_MEMDEBUG_PANIC("Unallocated buffer ! Double free of net_buf %pK ?",
3436 net_buf);
3437 }
3438 }
3439 qdf_export_symbol(qdf_net_buf_debug_delete_node);
3440
qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,const char * func_name,uint32_t line_num)3441 void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,
3442 const char *func_name, uint32_t line_num)
3443 {
3444 qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
3445
3446 if (is_initial_mem_debug_disabled)
3447 return;
3448
3449 while (ext_list) {
3450 /*
3451 * Take care to add if it is Jumbo packet connected using
3452 * frag_list
3453 */
3454 qdf_nbuf_t next;
3455
3456 next = qdf_nbuf_queue_next(ext_list);
3457 qdf_net_buf_debug_add_node(ext_list, 0, func_name, line_num);
3458 ext_list = next;
3459 }
3460 qdf_net_buf_debug_add_node(net_buf, 0, func_name, line_num);
3461 }
3462 qdf_export_symbol(qdf_net_buf_debug_acquire_skb);
3463
qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)3464 void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
3465 {
3466 qdf_nbuf_t ext_list;
3467
3468 if (is_initial_mem_debug_disabled)
3469 return;
3470
3471 ext_list = qdf_nbuf_get_ext_list(net_buf);
3472 while (ext_list) {
3473 /*
3474 * Take care to free if it is Jumbo packet connected using
3475 * frag_list
3476 */
3477 qdf_nbuf_t next;
3478
3479 next = qdf_nbuf_queue_next(ext_list);
3480
3481 if (qdf_nbuf_get_users(ext_list) > 1) {
3482 ext_list = next;
3483 continue;
3484 }
3485
3486 qdf_net_buf_debug_delete_node(ext_list);
3487 ext_list = next;
3488 }
3489
3490 if (qdf_nbuf_get_users(net_buf) > 1)
3491 return;
3492
3493 qdf_net_buf_debug_delete_node(net_buf);
3494 }
3495 qdf_export_symbol(qdf_net_buf_debug_release_skb);
3496
qdf_nbuf_alloc_debug(qdf_device_t osdev,qdf_size_t size,int reserve,int align,int prio,const char * func,uint32_t line)3497 qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
3498 int reserve, int align, int prio,
3499 const char *func, uint32_t line)
3500 {
3501 qdf_nbuf_t nbuf;
3502
3503 if (is_initial_mem_debug_disabled)
3504 return __qdf_nbuf_alloc(osdev, size,
3505 reserve, align,
3506 prio, func, line);
3507
3508 nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio, func, line);
3509
3510 /* Store SKB in internal QDF tracking table */
3511 if (qdf_likely(nbuf)) {
3512 qdf_net_buf_debug_add_node(nbuf, size, func, line);
3513 qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
3514 } else {
3515 qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
3516 }
3517
3518 return nbuf;
3519 }
3520 qdf_export_symbol(qdf_nbuf_alloc_debug);
3521
qdf_nbuf_frag_alloc_debug(qdf_device_t osdev,qdf_size_t size,int reserve,int align,int prio,const char * func,uint32_t line)3522 qdf_nbuf_t qdf_nbuf_frag_alloc_debug(qdf_device_t osdev, qdf_size_t size,
3523 int reserve, int align, int prio,
3524 const char *func, uint32_t line)
3525 {
3526 qdf_nbuf_t nbuf;
3527
3528 if (is_initial_mem_debug_disabled)
3529 return __qdf_nbuf_frag_alloc(osdev, size,
3530 reserve, align,
3531 prio, func, line);
3532
3533 nbuf = __qdf_nbuf_frag_alloc(osdev, size, reserve, align, prio,
3534 func, line);
3535
3536 /* Store SKB in internal QDF tracking table */
3537 if (qdf_likely(nbuf)) {
3538 qdf_net_buf_debug_add_node(nbuf, size, func, line);
3539 qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
3540 } else {
3541 qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
3542 }
3543
3544 return nbuf;
3545 }
3546
3547 qdf_export_symbol(qdf_nbuf_frag_alloc_debug);
3548
qdf_nbuf_alloc_no_recycler_debug(size_t size,int reserve,int align,const char * func,uint32_t line)3549 qdf_nbuf_t qdf_nbuf_alloc_no_recycler_debug(size_t size, int reserve, int align,
3550 const char *func, uint32_t line)
3551 {
3552 qdf_nbuf_t nbuf;
3553
3554 if (is_initial_mem_debug_disabled)
3555 return __qdf_nbuf_alloc_no_recycler(size, reserve, align, func,
3556 line);
3557
3558 nbuf = __qdf_nbuf_alloc_no_recycler(size, reserve, align, func, line);
3559
3560 /* Store SKB in internal QDF tracking table */
3561 if (qdf_likely(nbuf)) {
3562 qdf_net_buf_debug_add_node(nbuf, size, func, line);
3563 qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
3564 } else {
3565 qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
3566 }
3567
3568 return nbuf;
3569 }
3570
3571 qdf_export_symbol(qdf_nbuf_alloc_no_recycler_debug);
3572
qdf_nbuf_free_debug(qdf_nbuf_t nbuf,const char * func,uint32_t line)3573 void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, const char *func, uint32_t line)
3574 {
3575 qdf_nbuf_t ext_list;
3576 qdf_frag_t p_frag;
3577 uint32_t num_nr_frags;
3578 uint32_t idx = 0;
3579
3580 if (qdf_unlikely(!nbuf))
3581 return;
3582
3583 if (is_initial_mem_debug_disabled)
3584 goto free_buf;
3585
3586 if (qdf_nbuf_get_users(nbuf) > 1)
3587 goto free_buf;
3588
3589 /* Remove SKB from internal QDF tracking table */
3590 qdf_nbuf_panic_on_free_if_smmu_mapped(nbuf, func, line);
3591 qdf_nbuf_panic_on_free_if_mapped(nbuf, func, line);
3592 qdf_net_buf_debug_delete_node(nbuf);
3593 qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_FREE);
3594
3595 /* Take care to delete the debug entries for frags */
3596 num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
3597
3598 qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3599
3600 while (idx < num_nr_frags) {
3601 p_frag = qdf_nbuf_get_frag_addr(nbuf, idx);
3602 if (qdf_likely(p_frag))
3603 qdf_frag_debug_refcount_dec(p_frag, func, line);
3604 idx++;
3605 }
3606
3607 /*
3608 * Take care to update the debug entries for frag_list and also
3609 * for the frags attached to frag_list
3610 */
3611 ext_list = qdf_nbuf_get_ext_list(nbuf);
3612 while (ext_list) {
3613 if (qdf_nbuf_get_users(ext_list) == 1) {
3614 qdf_nbuf_panic_on_free_if_smmu_mapped(ext_list, func,
3615 line);
3616 qdf_nbuf_panic_on_free_if_mapped(ext_list, func, line);
3617 idx = 0;
3618 num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
3619 qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3620 while (idx < num_nr_frags) {
3621 p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
3622 if (qdf_likely(p_frag))
3623 qdf_frag_debug_refcount_dec(p_frag,
3624 func, line);
3625 idx++;
3626 }
3627 qdf_net_buf_debug_delete_node(ext_list);
3628 }
3629
3630 ext_list = qdf_nbuf_queue_next(ext_list);
3631 }
3632
3633 free_buf:
3634 __qdf_nbuf_free(nbuf);
3635 }
3636 qdf_export_symbol(qdf_nbuf_free_debug);
3637
__qdf_nbuf_alloc_simple(qdf_device_t osdev,size_t size,const char * func,uint32_t line)3638 struct sk_buff *__qdf_nbuf_alloc_simple(qdf_device_t osdev, size_t size,
3639 const char *func, uint32_t line)
3640 {
3641 struct sk_buff *skb;
3642 int flags = GFP_KERNEL;
3643
3644 if (in_interrupt() || irqs_disabled() || in_atomic()) {
3645 flags = GFP_ATOMIC;
3646 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
3647 /*
3648 * Observed that kcompactd burns out CPU to make order-3 page.
3649 *__netdev_alloc_skb has 4k page fallback option just in case of
3650 * failing high order page allocation so we don't need to be
3651 * hard. Make kcompactd rest in piece.
3652 */
3653 flags = flags & ~__GFP_KSWAPD_RECLAIM;
3654 #endif
3655 }
3656
3657 skb = __netdev_alloc_skb(NULL, size, flags);
3658
3659
3660 if (qdf_likely(is_initial_mem_debug_disabled)) {
3661 if (qdf_likely(skb))
3662 qdf_nbuf_count_inc(skb);
3663 } else {
3664 if (qdf_likely(skb)) {
3665 qdf_nbuf_count_inc(skb);
3666 qdf_net_buf_debug_add_node(skb, size, func, line);
3667 qdf_nbuf_history_add(skb, func, line, QDF_NBUF_ALLOC);
3668 } else {
3669 qdf_nbuf_history_add(skb, func, line, QDF_NBUF_ALLOC_FAILURE);
3670 }
3671 }
3672
3673
3674 return skb;
3675 }
3676
3677 qdf_export_symbol(__qdf_nbuf_alloc_simple);
3678
qdf_nbuf_free_debug_simple(qdf_nbuf_t nbuf,const char * func,uint32_t line)3679 void qdf_nbuf_free_debug_simple(qdf_nbuf_t nbuf, const char *func,
3680 uint32_t line)
3681 {
3682 if (qdf_likely(nbuf)) {
3683 if (is_initial_mem_debug_disabled) {
3684 dev_kfree_skb_any(nbuf);
3685 } else {
3686 qdf_nbuf_free_debug(nbuf, func, line);
3687 }
3688 }
3689 }
3690
3691 qdf_export_symbol(qdf_nbuf_free_debug_simple);
3692
qdf_nbuf_clone_debug(qdf_nbuf_t buf,const char * func,uint32_t line)3693 qdf_nbuf_t qdf_nbuf_clone_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
3694 {
3695 uint32_t num_nr_frags;
3696 uint32_t idx = 0;
3697 qdf_nbuf_t ext_list;
3698 qdf_frag_t p_frag;
3699
3700 qdf_nbuf_t cloned_buf = __qdf_nbuf_clone(buf);
3701
3702 if (is_initial_mem_debug_disabled)
3703 return cloned_buf;
3704
3705 if (qdf_unlikely(!cloned_buf))
3706 return NULL;
3707
3708 /* Take care to update the debug entries for frags */
3709 num_nr_frags = qdf_nbuf_get_nr_frags(cloned_buf);
3710
3711 qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3712
3713 while (idx < num_nr_frags) {
3714 p_frag = qdf_nbuf_get_frag_addr(cloned_buf, idx);
3715 if (qdf_likely(p_frag))
3716 qdf_frag_debug_refcount_inc(p_frag, func, line);
3717 idx++;
3718 }
3719
3720 /* Take care to update debug entries for frags attached to frag_list */
3721 ext_list = qdf_nbuf_get_ext_list(cloned_buf);
3722 while (ext_list) {
3723 idx = 0;
3724 num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
3725
3726 qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3727
3728 while (idx < num_nr_frags) {
3729 p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
3730 if (qdf_likely(p_frag))
3731 qdf_frag_debug_refcount_inc(p_frag, func, line);
3732 idx++;
3733 }
3734 ext_list = qdf_nbuf_queue_next(ext_list);
3735 }
3736
3737 /* Store SKB in internal QDF tracking table */
3738 qdf_net_buf_debug_add_node(cloned_buf, 0, func, line);
3739 qdf_nbuf_history_add(cloned_buf, func, line, QDF_NBUF_ALLOC_CLONE);
3740
3741 return cloned_buf;
3742 }
3743 qdf_export_symbol(qdf_nbuf_clone_debug);
3744
3745 qdf_nbuf_t
qdf_nbuf_page_frag_alloc_debug(qdf_device_t osdev,qdf_size_t size,int reserve,int align,__qdf_frag_cache_t * pf_cache,const char * func,uint32_t line)3746 qdf_nbuf_page_frag_alloc_debug(qdf_device_t osdev, qdf_size_t size, int reserve,
3747 int align, __qdf_frag_cache_t *pf_cache,
3748 const char *func, uint32_t line)
3749 {
3750 qdf_nbuf_t nbuf;
3751
3752 if (is_initial_mem_debug_disabled)
3753 return __qdf_nbuf_page_frag_alloc(osdev, size, reserve, align,
3754 pf_cache, func, line);
3755
3756 nbuf = __qdf_nbuf_page_frag_alloc(osdev, size, reserve, align,
3757 pf_cache, func, line);
3758
3759 /* Store SKB in internal QDF tracking table */
3760 if (qdf_likely(nbuf)) {
3761 qdf_net_buf_debug_add_node(nbuf, size, func, line);
3762 qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
3763 } else {
3764 qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
3765 }
3766
3767 return nbuf;
3768 }
3769
3770 qdf_export_symbol(qdf_nbuf_page_frag_alloc_debug);
3771
qdf_nbuf_copy_debug(qdf_nbuf_t buf,const char * func,uint32_t line)3772 qdf_nbuf_t qdf_nbuf_copy_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
3773 {
3774 qdf_nbuf_t copied_buf = __qdf_nbuf_copy(buf);
3775
3776 if (is_initial_mem_debug_disabled)
3777 return copied_buf;
3778
3779 if (qdf_unlikely(!copied_buf))
3780 return NULL;
3781
3782 /* Store SKB in internal QDF tracking table */
3783 qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
3784 qdf_nbuf_history_add(copied_buf, func, line, QDF_NBUF_ALLOC_COPY);
3785
3786 return copied_buf;
3787 }
3788 qdf_export_symbol(qdf_nbuf_copy_debug);
3789
3790 qdf_nbuf_t
qdf_nbuf_copy_expand_debug(qdf_nbuf_t buf,int headroom,int tailroom,const char * func,uint32_t line)3791 qdf_nbuf_copy_expand_debug(qdf_nbuf_t buf, int headroom, int tailroom,
3792 const char *func, uint32_t line)
3793 {
3794 qdf_nbuf_t copied_buf = __qdf_nbuf_copy_expand(buf, headroom, tailroom);
3795
3796 if (qdf_unlikely(!copied_buf))
3797 return NULL;
3798
3799 if (is_initial_mem_debug_disabled)
3800 return copied_buf;
3801
3802 /* Store SKB in internal QDF tracking table */
3803 qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
3804 qdf_nbuf_history_add(copied_buf, func, line,
3805 QDF_NBUF_ALLOC_COPY_EXPAND);
3806
3807 return copied_buf;
3808 }
3809
3810 qdf_export_symbol(qdf_nbuf_copy_expand_debug);
3811
3812 qdf_nbuf_t
qdf_nbuf_unshare_debug(qdf_nbuf_t buf,const char * func_name,uint32_t line_num)3813 qdf_nbuf_unshare_debug(qdf_nbuf_t buf, const char *func_name,
3814 uint32_t line_num)
3815 {
3816 qdf_nbuf_t unshared_buf;
3817 qdf_frag_t p_frag;
3818 uint32_t num_nr_frags;
3819 uint32_t idx = 0;
3820 qdf_nbuf_t ext_list, next;
3821
3822 if (is_initial_mem_debug_disabled)
3823 return __qdf_nbuf_unshare(buf);
3824
3825 /* Not a shared buffer, nothing to do */
3826 if (!qdf_nbuf_is_cloned(buf))
3827 return buf;
3828
3829 if (qdf_nbuf_get_users(buf) > 1)
3830 goto unshare_buf;
3831
3832 /* Take care to delete the debug entries for frags */
3833 num_nr_frags = qdf_nbuf_get_nr_frags(buf);
3834
3835 while (idx < num_nr_frags) {
3836 p_frag = qdf_nbuf_get_frag_addr(buf, idx);
3837 if (qdf_likely(p_frag))
3838 qdf_frag_debug_refcount_dec(p_frag, func_name,
3839 line_num);
3840 idx++;
3841 }
3842
3843 qdf_net_buf_debug_delete_node(buf);
3844
3845 /* Take care of jumbo packet connected using frag_list and frags */
3846 ext_list = qdf_nbuf_get_ext_list(buf);
3847 while (ext_list) {
3848 idx = 0;
3849 next = qdf_nbuf_queue_next(ext_list);
3850 num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
3851
3852 if (qdf_nbuf_get_users(ext_list) > 1) {
3853 ext_list = next;
3854 continue;
3855 }
3856
3857 while (idx < num_nr_frags) {
3858 p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
3859 if (qdf_likely(p_frag))
3860 qdf_frag_debug_refcount_dec(p_frag, func_name,
3861 line_num);
3862 idx++;
3863 }
3864
3865 qdf_net_buf_debug_delete_node(ext_list);
3866 ext_list = next;
3867 }
3868
3869 unshare_buf:
3870 unshared_buf = __qdf_nbuf_unshare(buf);
3871
3872 if (qdf_likely(unshared_buf))
3873 qdf_net_buf_debug_add_node(unshared_buf, 0, func_name,
3874 line_num);
3875
3876 return unshared_buf;
3877 }
3878
3879 qdf_export_symbol(qdf_nbuf_unshare_debug);
3880
3881 void
qdf_nbuf_dev_kfree_list_debug(__qdf_nbuf_queue_head_t * nbuf_queue_head,const char * func,uint32_t line)3882 qdf_nbuf_dev_kfree_list_debug(__qdf_nbuf_queue_head_t *nbuf_queue_head,
3883 const char *func, uint32_t line)
3884 {
3885 qdf_nbuf_t buf;
3886
3887 if (qdf_nbuf_queue_empty(nbuf_queue_head))
3888 return;
3889
3890 if (is_initial_mem_debug_disabled)
3891 return __qdf_nbuf_dev_kfree_list(nbuf_queue_head);
3892
3893 while ((buf = qdf_nbuf_queue_head_dequeue(nbuf_queue_head)) != NULL)
3894 qdf_nbuf_free_debug(buf, func, line);
3895 }
3896
3897 qdf_export_symbol(qdf_nbuf_dev_kfree_list_debug);
3898 #endif /* NBUF_MEMORY_DEBUG */
3899
3900 #if defined(QCA_DP_NBUF_FAST_PPEDS)
3901 #if defined(NBUF_MEMORY_DEBUG)
__qdf_nbuf_alloc_ppe_ds(qdf_device_t osdev,size_t size,const char * func,uint32_t line)3902 struct sk_buff *__qdf_nbuf_alloc_ppe_ds(qdf_device_t osdev, size_t size,
3903 const char *func, uint32_t line)
3904 {
3905 struct sk_buff *skb;
3906 int flags = GFP_KERNEL;
3907
3908 if (in_interrupt() || irqs_disabled() || in_atomic()) {
3909 flags = GFP_ATOMIC;
3910 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
3911 /*
3912 * Observed that kcompactd burns out CPU to make order-3
3913 * page.__netdev_alloc_skb has 4k page fallback option
3914 * just in case of
3915 * failing high order page allocation so we don't need
3916 * to be hard. Make kcompactd rest in piece.
3917 */
3918 flags = flags & ~__GFP_KSWAPD_RECLAIM;
3919 #endif
3920 }
3921 skb = __netdev_alloc_skb_no_skb_reset(NULL, size, flags);
3922 if (qdf_likely(is_initial_mem_debug_disabled)) {
3923 if (qdf_likely(skb))
3924 qdf_nbuf_count_inc(skb);
3925 } else {
3926 if (qdf_likely(skb)) {
3927 qdf_nbuf_count_inc(skb);
3928 qdf_net_buf_debug_add_node(skb, size, func, line);
3929 qdf_nbuf_history_add(skb, func, line,
3930 QDF_NBUF_ALLOC);
3931 } else {
3932 qdf_nbuf_history_add(skb, func, line,
3933 QDF_NBUF_ALLOC_FAILURE);
3934 }
3935 }
3936 return skb;
3937 }
3938 #else
__qdf_nbuf_alloc_ppe_ds(qdf_device_t osdev,size_t size,const char * func,uint32_t line)3939 struct sk_buff *__qdf_nbuf_alloc_ppe_ds(qdf_device_t osdev, size_t size,
3940 const char *func, uint32_t line)
3941 {
3942 struct sk_buff *skb;
3943 int flags = GFP_KERNEL;
3944
3945 if (in_interrupt() || irqs_disabled() || in_atomic()) {
3946 flags = GFP_ATOMIC;
3947 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
3948 /*
3949 * Observed that kcompactd burns out CPU to make order-3
3950 * page.__netdev_alloc_skb has 4k page fallback option
3951 * just in case of
3952 * failing high order page allocation so we don't need
3953 * to be hard. Make kcompactd rest in piece.
3954 */
3955 flags = flags & ~__GFP_KSWAPD_RECLAIM;
3956 #endif
3957 }
3958 skb = __netdev_alloc_skb_no_skb_reset(NULL, size, flags);
3959 if (qdf_likely(skb))
3960 qdf_nbuf_count_inc(skb);
3961
3962 return skb;
3963 }
3964 #endif
3965 qdf_export_symbol(__qdf_nbuf_alloc_ppe_ds);
3966 #endif
3967
3968 #if defined(FEATURE_TSO)
3969
3970 /**
3971 * struct qdf_tso_cmn_seg_info_t - TSO common info structure
3972 *
3973 * @ethproto: ethernet type of the msdu
3974 * @ip_tcp_hdr_len: ip + tcp length for the msdu
3975 * @l2_len: L2 length for the msdu
3976 * @eit_hdr: pointer to EIT header
3977 * @eit_hdr_len: EIT header length for the msdu
3978 * @eit_hdr_dma_map_addr: dma addr for EIT header
3979 * @tcphdr: pointer to tcp header
3980 * @ipv4_csum_en: ipv4 checksum enable
3981 * @tcp_ipv4_csum_en: TCP ipv4 checksum enable
3982 * @tcp_ipv6_csum_en: TCP ipv6 checksum enable
3983 * @ip_id: IP id
3984 * @tcp_seq_num: TCP sequence number
3985 *
3986 * This structure holds the TSO common info that is common
3987 * across all the TCP segments of the jumbo packet.
3988 */
3989 struct qdf_tso_cmn_seg_info_t {
3990 uint16_t ethproto;
3991 uint16_t ip_tcp_hdr_len;
3992 uint16_t l2_len;
3993 uint8_t *eit_hdr;
3994 uint32_t eit_hdr_len;
3995 qdf_dma_addr_t eit_hdr_dma_map_addr;
3996 struct tcphdr *tcphdr;
3997 uint16_t ipv4_csum_en;
3998 uint16_t tcp_ipv4_csum_en;
3999 uint16_t tcp_ipv6_csum_en;
4000 uint16_t ip_id;
4001 uint32_t tcp_seq_num;
4002 };
4003
4004 /**
4005 * qdf_nbuf_adj_tso_frag() - adjustment for buffer address of tso fragment
4006 * @skb: network buffer
4007 *
4008 * Return: byte offset length of 8 bytes aligned.
4009 */
4010 #ifdef FIX_TXDMA_LIMITATION
qdf_nbuf_adj_tso_frag(struct sk_buff * skb)4011 static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb)
4012 {
4013 uint32_t eit_hdr_len;
4014 uint8_t *eit_hdr;
4015 uint8_t byte_8_align_offset;
4016
4017 eit_hdr = skb->data;
4018 eit_hdr_len = (skb_transport_header(skb)
4019 - skb_mac_header(skb)) + tcp_hdrlen(skb);
4020 byte_8_align_offset = ((unsigned long)(eit_hdr) + eit_hdr_len) & 0x7L;
4021 if (qdf_unlikely(byte_8_align_offset)) {
4022 TSO_DEBUG("%pK,Len %d %d",
4023 eit_hdr, eit_hdr_len, byte_8_align_offset);
4024 if (unlikely(skb_headroom(skb) < byte_8_align_offset)) {
4025 TSO_DEBUG("[%d]Insufficient headroom,[%pK],[%pK],[%d]",
4026 __LINE__, skb->head, skb->data,
4027 byte_8_align_offset);
4028 return 0;
4029 }
4030 qdf_nbuf_push_head(skb, byte_8_align_offset);
4031 qdf_mem_move(skb->data,
4032 skb->data + byte_8_align_offset,
4033 eit_hdr_len);
4034 skb->len -= byte_8_align_offset;
4035 skb->mac_header -= byte_8_align_offset;
4036 skb->network_header -= byte_8_align_offset;
4037 skb->transport_header -= byte_8_align_offset;
4038 }
4039 return byte_8_align_offset;
4040 }
4041 #else
qdf_nbuf_adj_tso_frag(struct sk_buff * skb)4042 static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb)
4043 {
4044 return 0;
4045 }
4046 #endif
4047
4048 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
qdf_record_nbuf_nbytes(uint32_t nbytes,qdf_dma_dir_t dir,bool is_mapped)4049 void qdf_record_nbuf_nbytes(
4050 uint32_t nbytes, qdf_dma_dir_t dir, bool is_mapped)
4051 {
4052 __qdf_record_nbuf_nbytes(nbytes, dir, is_mapped);
4053 }
4054
4055 qdf_export_symbol(qdf_record_nbuf_nbytes);
4056
4057 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
4058
4059 /**
4060 * qdf_nbuf_tso_map_frag() - Map TSO segment
4061 * @osdev: qdf device handle
4062 * @tso_frag_vaddr: addr of tso fragment
4063 * @nbytes: number of bytes
4064 * @dir: direction
4065 *
4066 * Map TSO segment and for MCL record the amount of memory mapped
4067 *
4068 * Return: DMA address of mapped TSO fragment in success and
4069 * NULL in case of DMA mapping failure
4070 */
qdf_nbuf_tso_map_frag(qdf_device_t osdev,void * tso_frag_vaddr,uint32_t nbytes,qdf_dma_dir_t dir)4071 static inline qdf_dma_addr_t qdf_nbuf_tso_map_frag(
4072 qdf_device_t osdev, void *tso_frag_vaddr,
4073 uint32_t nbytes, qdf_dma_dir_t dir)
4074 {
4075 qdf_dma_addr_t tso_frag_paddr = 0;
4076
4077 tso_frag_paddr = dma_map_single(osdev->dev, tso_frag_vaddr,
4078 nbytes, __qdf_dma_dir_to_os(dir));
4079 if (unlikely(dma_mapping_error(osdev->dev, tso_frag_paddr))) {
4080 qdf_err("DMA mapping error!");
4081 qdf_assert_always(0);
4082 return 0;
4083 }
4084 qdf_record_nbuf_nbytes(nbytes, dir, true);
4085 return tso_frag_paddr;
4086 }
4087
4088 /**
4089 * qdf_nbuf_tso_unmap_frag() - Unmap TSO segment
4090 * @osdev: qdf device handle
4091 * @tso_frag_paddr: DMA addr of tso fragment
4092 * @dir: direction
4093 * @nbytes: number of bytes
4094 *
4095 * Unmap TSO segment and for MCL record the amount of memory mapped
4096 *
4097 * Return: None
4098 */
qdf_nbuf_tso_unmap_frag(qdf_device_t osdev,qdf_dma_addr_t tso_frag_paddr,uint32_t nbytes,qdf_dma_dir_t dir)4099 static inline void qdf_nbuf_tso_unmap_frag(
4100 qdf_device_t osdev, qdf_dma_addr_t tso_frag_paddr,
4101 uint32_t nbytes, qdf_dma_dir_t dir)
4102 {
4103 qdf_record_nbuf_nbytes(nbytes, dir, false);
4104 dma_unmap_single(osdev->dev, tso_frag_paddr,
4105 nbytes, __qdf_dma_dir_to_os(dir));
4106 }
4107
4108 /**
4109 * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
4110 * information
4111 * @osdev: qdf device handle
4112 * @skb: skb buffer
4113 * @tso_info: Parameters common to all segments
4114 *
4115 * Get the TSO information that is common across all the TCP
4116 * segments of the jumbo packet
4117 *
4118 * Return: 0 - success 1 - failure
4119 */
__qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,struct sk_buff * skb,struct qdf_tso_cmn_seg_info_t * tso_info)4120 static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
4121 struct sk_buff *skb,
4122 struct qdf_tso_cmn_seg_info_t *tso_info)
4123 {
4124 /* Get ethernet type and ethernet header length */
4125 tso_info->ethproto = vlan_get_protocol(skb);
4126
4127 /* Determine whether this is an IPv4 or IPv6 packet */
4128 if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
4129 /* for IPv4, get the IP ID and enable TCP and IP csum */
4130 struct iphdr *ipv4_hdr = ip_hdr(skb);
4131
4132 tso_info->ip_id = ntohs(ipv4_hdr->id);
4133 tso_info->ipv4_csum_en = 1;
4134 tso_info->tcp_ipv4_csum_en = 1;
4135 if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
4136 qdf_err("TSO IPV4 proto 0x%x not TCP",
4137 ipv4_hdr->protocol);
4138 return 1;
4139 }
4140 } else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
4141 /* for IPv6, enable TCP csum. No IP ID or IP csum */
4142 tso_info->tcp_ipv6_csum_en = 1;
4143 } else {
4144 qdf_err("TSO: ethertype 0x%x is not supported!",
4145 tso_info->ethproto);
4146 return 1;
4147 }
4148 tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
4149 tso_info->tcphdr = tcp_hdr(skb);
4150 tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
4151 /* get pointer to the ethernet + IP + TCP header and their length */
4152 tso_info->eit_hdr = skb->data;
4153 tso_info->eit_hdr_len = (skb_transport_header(skb)
4154 - skb_mac_header(skb)) + tcp_hdrlen(skb);
4155 tso_info->eit_hdr_dma_map_addr = qdf_nbuf_tso_map_frag(
4156 osdev, tso_info->eit_hdr,
4157 tso_info->eit_hdr_len,
4158 QDF_DMA_TO_DEVICE);
4159 if (qdf_unlikely(!tso_info->eit_hdr_dma_map_addr))
4160 return 1;
4161
4162 if (tso_info->ethproto == htons(ETH_P_IP)) {
4163 /* include IPv4 header length for IPV4 (total length) */
4164 tso_info->ip_tcp_hdr_len =
4165 tso_info->eit_hdr_len - tso_info->l2_len;
4166 } else if (tso_info->ethproto == htons(ETH_P_IPV6)) {
4167 /* exclude IPv6 header length for IPv6 (payload length) */
4168 tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb);
4169 }
4170 /*
4171 * The length of the payload (application layer data) is added to
4172 * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext
4173 * descriptor.
4174 */
4175
4176 TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u skb len %u\n", __func__,
4177 tso_info->tcp_seq_num,
4178 tso_info->eit_hdr_len,
4179 tso_info->l2_len,
4180 skb->len);
4181 return 0;
4182 }
4183
4184
4185 /**
4186 * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment
4187 *
4188 * @curr_seg: Segment whose contents are initialized
4189 * @tso_cmn_info: Parameters common to all segments
4190 *
4191 * Return: None
4192 */
__qdf_nbuf_fill_tso_cmn_seg_info(struct qdf_tso_seg_elem_t * curr_seg,struct qdf_tso_cmn_seg_info_t * tso_cmn_info)4193 static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
4194 struct qdf_tso_seg_elem_t *curr_seg,
4195 struct qdf_tso_cmn_seg_info_t *tso_cmn_info)
4196 {
4197 /* Initialize the flags to 0 */
4198 memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
4199
4200 /*
4201 * The following fields remain the same across all segments of
4202 * a jumbo packet
4203 */
4204 curr_seg->seg.tso_flags.tso_enable = 1;
4205 curr_seg->seg.tso_flags.ipv4_checksum_en =
4206 tso_cmn_info->ipv4_csum_en;
4207 curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
4208 tso_cmn_info->tcp_ipv6_csum_en;
4209 curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
4210 tso_cmn_info->tcp_ipv4_csum_en;
4211 curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
4212
4213 /* The following fields change for the segments */
4214 curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id;
4215 tso_cmn_info->ip_id++;
4216
4217 curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn;
4218 curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst;
4219 curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack;
4220 curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg;
4221 curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece;
4222 curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr;
4223
4224 curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num;
4225
4226 /*
4227 * First fragment for each segment always contains the ethernet,
4228 * IP and TCP header
4229 */
4230 curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr;
4231 curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len;
4232 curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length;
4233 curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr;
4234
4235 TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n",
4236 __func__, __LINE__, tso_cmn_info->eit_hdr,
4237 tso_cmn_info->eit_hdr_len,
4238 curr_seg->seg.tso_flags.tcp_seq_num,
4239 curr_seg->seg.total_len);
4240 qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG);
4241 }
4242
__qdf_nbuf_get_tso_info(qdf_device_t osdev,struct sk_buff * skb,struct qdf_tso_info_t * tso_info)4243 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
4244 struct qdf_tso_info_t *tso_info)
4245 {
4246 /* common across all segments */
4247 struct qdf_tso_cmn_seg_info_t tso_cmn_info;
4248 /* segment specific */
4249 void *tso_frag_vaddr;
4250 qdf_dma_addr_t tso_frag_paddr = 0;
4251 uint32_t num_seg = 0;
4252 struct qdf_tso_seg_elem_t *curr_seg;
4253 struct qdf_tso_num_seg_elem_t *total_num_seg;
4254 skb_frag_t *frag = NULL;
4255 uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
4256 uint32_t skb_frag_len = 0; /* skb's fragment length (contiguous memory)*/
4257 uint32_t skb_proc = skb->len; /* bytes of skb pending processing */
4258 uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
4259 int j = 0; /* skb fragment index */
4260 uint8_t byte_8_align_offset;
4261
4262 memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
4263 total_num_seg = tso_info->tso_num_seg_list;
4264 curr_seg = tso_info->tso_seg_list;
4265 total_num_seg->num_seg.tso_cmn_num_seg = 0;
4266
4267 byte_8_align_offset = qdf_nbuf_adj_tso_frag(skb);
4268
4269 if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev,
4270 skb, &tso_cmn_info))) {
4271 qdf_warn("TSO: error getting common segment info");
4272 return 0;
4273 }
4274
4275 /* length of the first chunk of data in the skb */
4276 skb_frag_len = skb_headlen(skb);
4277
4278 /* the 0th tso segment's 0th fragment always contains the EIT header */
4279 /* update the remaining skb fragment length and TSO segment length */
4280 skb_frag_len -= tso_cmn_info.eit_hdr_len;
4281 skb_proc -= tso_cmn_info.eit_hdr_len;
4282
4283 /* get the address to the next tso fragment */
4284 tso_frag_vaddr = skb->data +
4285 tso_cmn_info.eit_hdr_len +
4286 byte_8_align_offset;
4287 /* get the length of the next tso fragment */
4288 tso_frag_len = min(skb_frag_len, tso_seg_size);
4289
4290 if (tso_frag_len != 0) {
4291 tso_frag_paddr = qdf_nbuf_tso_map_frag(
4292 osdev, tso_frag_vaddr, tso_frag_len,
4293 QDF_DMA_TO_DEVICE);
4294 if (qdf_unlikely(!tso_frag_paddr))
4295 return 0;
4296 }
4297
4298 TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__,
4299 __LINE__, skb_frag_len, tso_frag_len);
4300 num_seg = tso_info->num_segs;
4301 tso_info->num_segs = 0;
4302 tso_info->is_tso = 1;
4303
4304 while (num_seg && curr_seg) {
4305 int i = 1; /* tso fragment index */
4306 uint8_t more_tso_frags = 1;
4307
4308 curr_seg->seg.num_frags = 0;
4309 tso_info->num_segs++;
4310 total_num_seg->num_seg.tso_cmn_num_seg++;
4311
4312 __qdf_nbuf_fill_tso_cmn_seg_info(curr_seg,
4313 &tso_cmn_info);
4314
4315 /* If TCP PSH flag is set, set it in the last or only segment */
4316 if (num_seg == 1)
4317 curr_seg->seg.tso_flags.psh = tso_cmn_info.tcphdr->psh;
4318
4319 if (unlikely(skb_proc == 0))
4320 return tso_info->num_segs;
4321
4322 curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
4323 curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len;
4324 /* frag len is added to ip_len in while loop below*/
4325
4326 curr_seg->seg.num_frags++;
4327
4328 while (more_tso_frags) {
4329 if (tso_frag_len != 0) {
4330 curr_seg->seg.tso_frags[i].vaddr =
4331 tso_frag_vaddr;
4332 curr_seg->seg.tso_frags[i].length =
4333 tso_frag_len;
4334 curr_seg->seg.total_len += tso_frag_len;
4335 curr_seg->seg.tso_flags.ip_len += tso_frag_len;
4336 curr_seg->seg.num_frags++;
4337 skb_proc = skb_proc - tso_frag_len;
4338
4339 /* increment the TCP sequence number */
4340
4341 tso_cmn_info.tcp_seq_num += tso_frag_len;
4342 curr_seg->seg.tso_frags[i].paddr =
4343 tso_frag_paddr;
4344
4345 qdf_assert_always(curr_seg->seg.tso_frags[i].paddr);
4346 }
4347
4348 TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n",
4349 __func__, __LINE__,
4350 i,
4351 tso_frag_len,
4352 curr_seg->seg.total_len,
4353 curr_seg->seg.tso_frags[i].vaddr);
4354
4355 /* if there is no more data left in the skb */
4356 if (!skb_proc)
4357 return tso_info->num_segs;
4358
4359 /* get the next payload fragment information */
4360 /* check if there are more fragments in this segment */
4361 if (tso_frag_len < tso_seg_size) {
4362 more_tso_frags = 1;
4363 if (tso_frag_len != 0) {
4364 tso_seg_size = tso_seg_size -
4365 tso_frag_len;
4366 i++;
4367 if (curr_seg->seg.num_frags ==
4368 FRAG_NUM_MAX) {
4369 more_tso_frags = 0;
4370 /*
4371 * reset i and the tso
4372 * payload size
4373 */
4374 i = 1;
4375 tso_seg_size =
4376 skb_shinfo(skb)->
4377 gso_size;
4378 }
4379 }
4380 } else {
4381 more_tso_frags = 0;
4382 /* reset i and the tso payload size */
4383 i = 1;
4384 tso_seg_size = skb_shinfo(skb)->gso_size;
4385 }
4386
4387 /* if the next fragment is contiguous */
4388 if ((tso_frag_len != 0) && (tso_frag_len < skb_frag_len)) {
4389 tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
4390 skb_frag_len = skb_frag_len - tso_frag_len;
4391 tso_frag_len = min(skb_frag_len, tso_seg_size);
4392
4393 } else { /* the next fragment is not contiguous */
4394 if (skb_shinfo(skb)->nr_frags == 0) {
4395 qdf_info("TSO: nr_frags == 0!");
4396 qdf_assert(0);
4397 return 0;
4398 }
4399 if (j >= skb_shinfo(skb)->nr_frags) {
4400 qdf_info("TSO: nr_frags %d j %d",
4401 skb_shinfo(skb)->nr_frags, j);
4402 qdf_assert(0);
4403 return 0;
4404 }
4405 frag = &skb_shinfo(skb)->frags[j];
4406 skb_frag_len = skb_frag_size(frag);
4407 tso_frag_len = min(skb_frag_len, tso_seg_size);
4408 tso_frag_vaddr = skb_frag_address_safe(frag);
4409 j++;
4410 }
4411
4412 TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n",
4413 __func__, __LINE__, skb_frag_len, tso_frag_len,
4414 tso_seg_size);
4415
4416 if (!(tso_frag_vaddr)) {
4417 TSO_DEBUG("%s: Fragment virtual addr is NULL",
4418 __func__);
4419 return 0;
4420 }
4421
4422 tso_frag_paddr = qdf_nbuf_tso_map_frag(
4423 osdev, tso_frag_vaddr,
4424 tso_frag_len,
4425 QDF_DMA_TO_DEVICE);
4426 if (qdf_unlikely(!tso_frag_paddr))
4427 return 0;
4428 }
4429 TSO_DEBUG("%s tcp_seq_num: %u", __func__,
4430 curr_seg->seg.tso_flags.tcp_seq_num);
4431 num_seg--;
4432 /* if TCP FIN flag was set, set it in the last segment */
4433 if (!num_seg)
4434 curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
4435
4436 qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO);
4437 curr_seg = curr_seg->next;
4438 }
4439 return tso_info->num_segs;
4440 }
4441 qdf_export_symbol(__qdf_nbuf_get_tso_info);
4442
__qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,struct qdf_tso_seg_elem_t * tso_seg,bool is_last_seg)4443 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
4444 struct qdf_tso_seg_elem_t *tso_seg,
4445 bool is_last_seg)
4446 {
4447 uint32_t num_frags = 0;
4448
4449 if (tso_seg->seg.num_frags > 0)
4450 num_frags = tso_seg->seg.num_frags - 1;
4451
4452 /*Num of frags in a tso seg cannot be less than 2 */
4453 if (num_frags < 1) {
4454 /*
4455 * If Num of frags is 1 in a tso seg but is_last_seg true,
4456 * this may happen when qdf_nbuf_get_tso_info failed,
4457 * do dma unmap for the 0th frag in this seg.
4458 */
4459 if (is_last_seg && tso_seg->seg.num_frags == 1)
4460 goto last_seg_free_first_frag;
4461
4462 qdf_assert(0);
4463 qdf_err("ERROR: num of frags in a tso segment is %d",
4464 (num_frags + 1));
4465 return;
4466 }
4467
4468 while (num_frags) {
4469 /*Do dma unmap the tso seg except the 0th frag */
4470 if (0 == tso_seg->seg.tso_frags[num_frags].paddr) {
4471 qdf_err("ERROR: TSO seg frag %d mapped physical address is NULL",
4472 num_frags);
4473 qdf_assert(0);
4474 return;
4475 }
4476 qdf_nbuf_tso_unmap_frag(
4477 osdev,
4478 tso_seg->seg.tso_frags[num_frags].paddr,
4479 tso_seg->seg.tso_frags[num_frags].length,
4480 QDF_DMA_TO_DEVICE);
4481 tso_seg->seg.tso_frags[num_frags].paddr = 0;
4482 num_frags--;
4483 qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
4484 }
4485
4486 last_seg_free_first_frag:
4487 if (is_last_seg) {
4488 /*Do dma unmap for the tso seg 0th frag */
4489 if (0 == tso_seg->seg.tso_frags[0].paddr) {
4490 qdf_err("ERROR: TSO seg frag 0 mapped physical address is NULL");
4491 qdf_assert(0);
4492 return;
4493 }
4494 qdf_nbuf_tso_unmap_frag(osdev,
4495 tso_seg->seg.tso_frags[0].paddr,
4496 tso_seg->seg.tso_frags[0].length,
4497 QDF_DMA_TO_DEVICE);
4498 tso_seg->seg.tso_frags[0].paddr = 0;
4499 qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST);
4500 }
4501 }
4502 qdf_export_symbol(__qdf_nbuf_unmap_tso_segment);
4503
__qdf_nbuf_get_tcp_payload_len(struct sk_buff * skb)4504 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
4505 {
4506 size_t packet_len;
4507
4508 packet_len = skb->len -
4509 ((skb_transport_header(skb) - skb_mac_header(skb)) +
4510 tcp_hdrlen(skb));
4511
4512 return packet_len;
4513 }
4514
4515 qdf_export_symbol(__qdf_nbuf_get_tcp_payload_len);
4516
4517 #ifndef BUILD_X86
__qdf_nbuf_get_tso_num_seg(struct sk_buff * skb)4518 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
4519 {
4520 uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
4521 uint32_t remainder, num_segs = 0;
4522 uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags;
4523 uint8_t frags_per_tso = 0;
4524 uint32_t skb_frag_len = 0;
4525 uint32_t eit_hdr_len = (skb_transport_header(skb)
4526 - skb_mac_header(skb)) + tcp_hdrlen(skb);
4527 skb_frag_t *frag = NULL;
4528 int j = 0;
4529 uint32_t temp_num_seg = 0;
4530
4531 /* length of the first chunk of data in the skb minus eit header*/
4532 skb_frag_len = skb_headlen(skb) - eit_hdr_len;
4533
4534 /* Calculate num of segs for skb's first chunk of data*/
4535 remainder = skb_frag_len % tso_seg_size;
4536 num_segs = skb_frag_len / tso_seg_size;
4537 /*
4538 * Remainder non-zero and nr_frags zero implies end of skb data.
4539 * In that case, one more tso seg is required to accommodate
4540 * remaining data, hence num_segs++. If nr_frags is non-zero,
4541 * then remaining data will be accommodated while doing the calculation
4542 * for nr_frags data. Hence, frags_per_tso++.
4543 */
4544 if (remainder) {
4545 if (!skb_nr_frags)
4546 num_segs++;
4547 else
4548 frags_per_tso++;
4549 }
4550
4551 while (skb_nr_frags) {
4552 if (j >= skb_shinfo(skb)->nr_frags) {
4553 qdf_info("TSO: nr_frags %d j %d",
4554 skb_shinfo(skb)->nr_frags, j);
4555 qdf_assert(0);
4556 return 0;
4557 }
4558 /*
4559 * Calculate the number of tso seg for nr_frags data:
4560 * Get the length of each frag in skb_frag_len, add to
4561 * remainder.Get the number of segments by dividing it to
4562 * tso_seg_size and calculate the new remainder.
4563 * Decrement the nr_frags value and keep
4564 * looping all the skb_fragments.
4565 */
4566 frag = &skb_shinfo(skb)->frags[j];
4567 skb_frag_len = skb_frag_size(frag);
4568 temp_num_seg = num_segs;
4569 remainder += skb_frag_len;
4570 num_segs += remainder / tso_seg_size;
4571 remainder = remainder % tso_seg_size;
4572 skb_nr_frags--;
4573 if (remainder) {
4574 if (num_segs > temp_num_seg)
4575 frags_per_tso = 0;
4576 /*
4577 * increment the tso per frags whenever remainder is
4578 * positive. If frags_per_tso reaches the (max-1),
4579 * [First frags always have EIT header, therefore max-1]
4580 * increment the num_segs as no more data can be
4581 * accommodated in the curr tso seg. Reset the remainder
4582 * and frags per tso and keep looping.
4583 */
4584 frags_per_tso++;
4585 if (frags_per_tso == FRAG_NUM_MAX - 1) {
4586 num_segs++;
4587 frags_per_tso = 0;
4588 remainder = 0;
4589 }
4590 /*
4591 * If this is the last skb frag and still remainder is
4592 * non-zero(frags_per_tso is not reached to the max-1)
4593 * then increment the num_segs to take care of the
4594 * remaining length.
4595 */
4596 if (!skb_nr_frags && remainder) {
4597 num_segs++;
4598 frags_per_tso = 0;
4599 }
4600 } else {
4601 /* Whenever remainder is 0, reset the frags_per_tso. */
4602 frags_per_tso = 0;
4603 }
4604 j++;
4605 }
4606
4607 return num_segs;
4608 }
4609 #elif !defined(QCA_WIFI_QCN9000)
__qdf_nbuf_get_tso_num_seg(struct sk_buff * skb)4610 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
4611 {
4612 uint32_t i, gso_size, tmp_len, num_segs = 0;
4613 skb_frag_t *frag = NULL;
4614
4615 /*
4616 * Check if the head SKB or any of frags are allocated in < 0x50000000
4617 * region which cannot be accessed by Target
4618 */
4619 if (virt_to_phys(skb->data) < 0x50000040) {
4620 TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n",
4621 __func__, __LINE__, skb_shinfo(skb)->nr_frags,
4622 virt_to_phys(skb->data));
4623 goto fail;
4624
4625 }
4626
4627 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4628 frag = &skb_shinfo(skb)->frags[i];
4629
4630 if (!frag)
4631 goto fail;
4632
4633 if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040)
4634 goto fail;
4635 }
4636
4637
4638 gso_size = skb_shinfo(skb)->gso_size;
4639 tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
4640 + tcp_hdrlen(skb));
4641 while (tmp_len) {
4642 num_segs++;
4643 if (tmp_len > gso_size)
4644 tmp_len -= gso_size;
4645 else
4646 break;
4647 }
4648
4649 return num_segs;
4650
4651 /*
4652 * Do not free this frame, just do socket level accounting
4653 * so that this is not reused.
4654 */
4655 fail:
4656 if (skb->sk)
4657 atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
4658
4659 return 0;
4660 }
4661 #else
__qdf_nbuf_get_tso_num_seg(struct sk_buff * skb)4662 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
4663 {
4664 uint32_t i, gso_size, tmp_len, num_segs = 0;
4665 skb_frag_t *frag = NULL;
4666
4667 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4668 frag = &skb_shinfo(skb)->frags[i];
4669
4670 if (!frag)
4671 goto fail;
4672 }
4673
4674 gso_size = skb_shinfo(skb)->gso_size;
4675 tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
4676 + tcp_hdrlen(skb));
4677 while (tmp_len) {
4678 num_segs++;
4679 if (tmp_len > gso_size)
4680 tmp_len -= gso_size;
4681 else
4682 break;
4683 }
4684
4685 return num_segs;
4686
4687 /*
4688 * Do not free this frame, just do socket level accounting
4689 * so that this is not reused.
4690 */
4691 fail:
4692 if (skb->sk)
4693 atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
4694
4695 return 0;
4696 }
4697 #endif
4698 qdf_export_symbol(__qdf_nbuf_get_tso_num_seg);
4699
4700 #endif /* FEATURE_TSO */
4701
__qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,uint32_t * lo,uint32_t * hi)4702 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
4703 uint32_t *lo, uint32_t *hi)
4704 {
4705 if (sizeof(dmaaddr) > sizeof(uint32_t)) {
4706 *lo = lower_32_bits(dmaaddr);
4707 *hi = upper_32_bits(dmaaddr);
4708 } else {
4709 *lo = dmaaddr;
4710 *hi = 0;
4711 }
4712 }
4713
4714 qdf_export_symbol(__qdf_dmaaddr_to_32s);
4715
__qdf_nbuf_inc_users(struct sk_buff * skb)4716 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
4717 {
4718 qdf_nbuf_users_inc(&skb->users);
4719 return skb;
4720 }
4721 qdf_export_symbol(__qdf_nbuf_inc_users);
4722
__qdf_nbuf_get_users(struct sk_buff * skb)4723 int __qdf_nbuf_get_users(struct sk_buff *skb)
4724 {
4725 return qdf_nbuf_users_read(&skb->users);
4726 }
4727 qdf_export_symbol(__qdf_nbuf_get_users);
4728
__qdf_nbuf_ref(struct sk_buff * skb)4729 void __qdf_nbuf_ref(struct sk_buff *skb)
4730 {
4731 skb_get(skb);
4732 }
4733 qdf_export_symbol(__qdf_nbuf_ref);
4734
__qdf_nbuf_shared(struct sk_buff * skb)4735 int __qdf_nbuf_shared(struct sk_buff *skb)
4736 {
4737 return skb_shared(skb);
4738 }
4739 qdf_export_symbol(__qdf_nbuf_shared);
4740
4741 QDF_STATUS
__qdf_nbuf_dmamap_create(qdf_device_t osdev,__qdf_dma_map_t * dmap)4742 __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
4743 {
4744 QDF_STATUS error = QDF_STATUS_SUCCESS;
4745 /*
4746 * driver can tell its SG capability, it must be handled.
4747 * Bounce buffers if they are there
4748 */
4749 (*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
4750 if (!(*dmap))
4751 error = QDF_STATUS_E_NOMEM;
4752
4753 return error;
4754 }
4755 qdf_export_symbol(__qdf_nbuf_dmamap_create);
4756
4757 void
__qdf_nbuf_dmamap_destroy(qdf_device_t osdev,__qdf_dma_map_t dmap)4758 __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
4759 {
4760 kfree(dmap);
4761 }
4762 qdf_export_symbol(__qdf_nbuf_dmamap_destroy);
4763
4764 #ifdef QDF_OS_DEBUG
4765 QDF_STATUS
__qdf_nbuf_map_nbytes(qdf_device_t osdev,struct sk_buff * skb,qdf_dma_dir_t dir,int nbytes)4766 __qdf_nbuf_map_nbytes(
4767 qdf_device_t osdev,
4768 struct sk_buff *skb,
4769 qdf_dma_dir_t dir,
4770 int nbytes)
4771 {
4772 struct skb_shared_info *sh = skb_shinfo(skb);
4773
4774 qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
4775
4776 /*
4777 * Assume there's only a single fragment.
4778 * To support multiple fragments, it would be necessary to change
4779 * adf_nbuf_t to be a separate object that stores meta-info
4780 * (including the bus address for each fragment) and a pointer
4781 * to the underlying sk_buff.
4782 */
4783 qdf_assert(sh->nr_frags == 0);
4784
4785 return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
4786 }
4787 qdf_export_symbol(__qdf_nbuf_map_nbytes);
4788 #else
4789 QDF_STATUS
__qdf_nbuf_map_nbytes(qdf_device_t osdev,struct sk_buff * skb,qdf_dma_dir_t dir,int nbytes)4790 __qdf_nbuf_map_nbytes(
4791 qdf_device_t osdev,
4792 struct sk_buff *skb,
4793 qdf_dma_dir_t dir,
4794 int nbytes)
4795 {
4796 return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
4797 }
4798 qdf_export_symbol(__qdf_nbuf_map_nbytes);
4799 #endif
4800 void
__qdf_nbuf_unmap_nbytes(qdf_device_t osdev,struct sk_buff * skb,qdf_dma_dir_t dir,int nbytes)4801 __qdf_nbuf_unmap_nbytes(
4802 qdf_device_t osdev,
4803 struct sk_buff *skb,
4804 qdf_dma_dir_t dir,
4805 int nbytes)
4806 {
4807 qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
4808
4809 /*
4810 * Assume there's a single fragment.
4811 * If this is not true, the assertion in __adf_nbuf_map will catch it.
4812 */
4813 __qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
4814 }
4815 qdf_export_symbol(__qdf_nbuf_unmap_nbytes);
4816
4817 void
__qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap,qdf_dmamap_info_t * sg)4818 __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
4819 {
4820 qdf_assert(bmap->mapped);
4821 qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
4822
4823 memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
4824 sizeof(struct __qdf_segment));
4825 sg->nsegs = bmap->nsegs;
4826 }
4827 qdf_export_symbol(__qdf_nbuf_dma_map_info);
4828
4829 #if defined(__QDF_SUPPORT_FRAG_MEM)
4830 void
__qdf_nbuf_frag_info(struct sk_buff * skb,qdf_sglist_t * sg)4831 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t *sg)
4832 {
4833 qdf_assert(skb);
4834 sg->sg_segs[0].vaddr = skb->data;
4835 sg->sg_segs[0].len = skb->len;
4836 sg->nsegs = 1;
4837
4838 for (int i = 1; i <= sh->nr_frags; i++) {
4839 skb_frag_t *f = &sh->frags[i - 1];
4840
4841 sg->sg_segs[i].vaddr = (uint8_t *)(page_address(f->page) +
4842 f->page_offset);
4843 sg->sg_segs[i].len = f->size;
4844
4845 qdf_assert(i < QDF_MAX_SGLIST);
4846 }
4847 sg->nsegs += i;
4848
4849 }
4850 qdf_export_symbol(__qdf_nbuf_frag_info);
4851 #else
4852 #ifdef QDF_OS_DEBUG
4853 void
__qdf_nbuf_frag_info(struct sk_buff * skb,qdf_sglist_t * sg)4854 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t *sg)
4855 {
4856
4857 struct skb_shared_info *sh = skb_shinfo(skb);
4858
4859 qdf_assert(skb);
4860 sg->sg_segs[0].vaddr = skb->data;
4861 sg->sg_segs[0].len = skb->len;
4862 sg->nsegs = 1;
4863
4864 qdf_assert(sh->nr_frags == 0);
4865 }
4866 qdf_export_symbol(__qdf_nbuf_frag_info);
4867 #else
4868 void
__qdf_nbuf_frag_info(struct sk_buff * skb,qdf_sglist_t * sg)4869 __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t *sg)
4870 {
4871 sg->sg_segs[0].vaddr = skb->data;
4872 sg->sg_segs[0].len = skb->len;
4873 sg->nsegs = 1;
4874 }
4875 qdf_export_symbol(__qdf_nbuf_frag_info);
4876 #endif
4877 #endif
4878 uint32_t
__qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf,uint32_t cur_frag)4879 __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
4880 {
4881 struct skb_shared_info *sh = skb_shinfo(nbuf);
4882 const skb_frag_t *frag = sh->frags + cur_frag;
4883
4884 return skb_frag_size(frag);
4885 }
4886 qdf_export_symbol(__qdf_nbuf_get_frag_size);
4887
4888 #ifdef A_SIMOS_DEVHOST
__qdf_nbuf_frag_map(qdf_device_t osdev,__qdf_nbuf_t nbuf,int offset,qdf_dma_dir_t dir,int cur_frag)4889 QDF_STATUS __qdf_nbuf_frag_map(
4890 qdf_device_t osdev, __qdf_nbuf_t nbuf,
4891 int offset, qdf_dma_dir_t dir, int cur_frag)
4892 {
4893 int32_t paddr, frag_len;
4894
4895 QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data;
4896 return QDF_STATUS_SUCCESS;
4897 }
4898 qdf_export_symbol(__qdf_nbuf_frag_map);
4899 #else
__qdf_nbuf_frag_map(qdf_device_t osdev,__qdf_nbuf_t nbuf,int offset,qdf_dma_dir_t dir,int cur_frag)4900 QDF_STATUS __qdf_nbuf_frag_map(
4901 qdf_device_t osdev, __qdf_nbuf_t nbuf,
4902 int offset, qdf_dma_dir_t dir, int cur_frag)
4903 {
4904 dma_addr_t paddr, frag_len;
4905 struct skb_shared_info *sh = skb_shinfo(nbuf);
4906 const skb_frag_t *frag = sh->frags + cur_frag;
4907
4908 frag_len = skb_frag_size(frag);
4909
4910 QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
4911 skb_frag_dma_map(osdev->dev, frag, offset, frag_len,
4912 __qdf_dma_dir_to_os(dir));
4913 return dma_mapping_error(osdev->dev, paddr) ?
4914 QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
4915 }
4916 qdf_export_symbol(__qdf_nbuf_frag_map);
4917 #endif
4918 void
__qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap,void * cb,void * arg)4919 __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
4920 {
4921 return;
4922 }
4923 qdf_export_symbol(__qdf_nbuf_dmamap_set_cb);
4924
4925 /**
4926 * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
4927 * @osdev: os device
4928 * @buf: sk buff
4929 * @dir: direction
4930 *
4931 * Return: none
4932 */
4933 #if defined(A_SIMOS_DEVHOST)
__qdf_nbuf_sync_single_for_cpu(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_dir_t dir)4934 static void __qdf_nbuf_sync_single_for_cpu(
4935 qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
4936 {
4937 return;
4938 }
4939 #else
__qdf_nbuf_sync_single_for_cpu(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_dir_t dir)4940 static void __qdf_nbuf_sync_single_for_cpu(
4941 qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
4942 {
4943 if (0 == QDF_NBUF_CB_PADDR(buf)) {
4944 qdf_err("ERROR: NBUF mapped physical address is NULL");
4945 return;
4946 }
4947 dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
4948 skb_end_offset(buf) - skb_headroom(buf),
4949 __qdf_dma_dir_to_os(dir));
4950 }
4951 #endif
4952
4953 void
__qdf_nbuf_sync_for_cpu(qdf_device_t osdev,struct sk_buff * skb,qdf_dma_dir_t dir)4954 __qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
4955 struct sk_buff *skb, qdf_dma_dir_t dir)
4956 {
4957 qdf_assert(
4958 (dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
4959
4960 /*
4961 * Assume there's a single fragment.
4962 * If this is not true, the assertion in __adf_nbuf_map will catch it.
4963 */
4964 __qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
4965 }
4966 qdf_export_symbol(__qdf_nbuf_sync_for_cpu);
4967
4968 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
4969 /**
4970 * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags
4971 * @rx_status: Pointer to rx_status.
4972 * @rtap_buf: Buf to which VHT info has to be updated.
4973 * @rtap_len: Current length of radiotap buffer
4974 *
4975 * Return: Length of radiotap after VHT flags updated.
4976 */
qdf_nbuf_update_radiotap_vht_flags(struct mon_rx_status * rx_status,int8_t * rtap_buf,uint32_t rtap_len)4977 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
4978 struct mon_rx_status *rx_status,
4979 int8_t *rtap_buf,
4980 uint32_t rtap_len)
4981 {
4982 uint16_t vht_flags = 0;
4983 struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
4984
4985 rtap_len = qdf_align(rtap_len, 2);
4986
4987 /* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
4988 vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
4989 IEEE80211_RADIOTAP_VHT_KNOWN_GI |
4990 IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM |
4991 IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED |
4992 IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH |
4993 IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID;
4994 put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]);
4995 rtap_len += 2;
4996
4997 rtap_buf[rtap_len] |=
4998 (rx_status->is_stbc ?
4999 IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) |
5000 (rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) |
5001 (rx_status->ldpc ?
5002 IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) |
5003 (rx_status->beamformed ?
5004 IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0);
5005 rtap_len += 1;
5006
5007 if (!rx_user_status) {
5008 switch (rx_status->vht_flag_values2) {
5009 case IEEE80211_RADIOTAP_VHT_BW_20:
5010 rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
5011 break;
5012 case IEEE80211_RADIOTAP_VHT_BW_40:
5013 rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
5014 break;
5015 case IEEE80211_RADIOTAP_VHT_BW_80:
5016 rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
5017 break;
5018 case IEEE80211_RADIOTAP_VHT_BW_160:
5019 rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
5020 break;
5021 }
5022 rtap_len += 1;
5023 rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]);
5024 rtap_len += 1;
5025 rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]);
5026 rtap_len += 1;
5027 rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]);
5028 rtap_len += 1;
5029 rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]);
5030 rtap_len += 1;
5031 rtap_buf[rtap_len] = (rx_status->vht_flag_values4);
5032 rtap_len += 1;
5033 rtap_buf[rtap_len] = (rx_status->vht_flag_values5);
5034 rtap_len += 1;
5035 put_unaligned_le16(rx_status->vht_flag_values6,
5036 &rtap_buf[rtap_len]);
5037 rtap_len += 2;
5038 } else {
5039 switch (rx_user_status->vht_flag_values2) {
5040 case IEEE80211_RADIOTAP_VHT_BW_20:
5041 rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
5042 break;
5043 case IEEE80211_RADIOTAP_VHT_BW_40:
5044 rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
5045 break;
5046 case IEEE80211_RADIOTAP_VHT_BW_80:
5047 rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
5048 break;
5049 case IEEE80211_RADIOTAP_VHT_BW_160:
5050 rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
5051 break;
5052 }
5053 rtap_len += 1;
5054 rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[0]);
5055 rtap_len += 1;
5056 rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[1]);
5057 rtap_len += 1;
5058 rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[2]);
5059 rtap_len += 1;
5060 rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[3]);
5061 rtap_len += 1;
5062 rtap_buf[rtap_len] = (rx_user_status->vht_flag_values4);
5063 rtap_len += 1;
5064 rtap_buf[rtap_len] = (rx_user_status->vht_flag_values5);
5065 rtap_len += 1;
5066 put_unaligned_le16(rx_user_status->vht_flag_values6,
5067 &rtap_buf[rtap_len]);
5068 rtap_len += 2;
5069 }
5070
5071 return rtap_len;
5072 }
5073
5074 /**
5075 * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status
5076 * @rx_status: Pointer to rx_status.
5077 * @rtap_buf: buffer to which radiotap has to be updated
5078 * @rtap_len: radiotap length
5079 *
5080 * API update high-efficiency (11ax) fields in the radiotap header
5081 *
5082 * Return: length of rtap_len updated.
5083 */
5084 static unsigned int
qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status * rx_status,int8_t * rtap_buf,uint32_t rtap_len)5085 qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
5086 int8_t *rtap_buf, uint32_t rtap_len)
5087 {
5088 /*
5089 * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16
5090 * Enable all "known" HE radiotap flags for now
5091 */
5092 struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
5093
5094 rtap_len = qdf_align(rtap_len, 2);
5095
5096 if (!rx_user_status) {
5097 put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
5098 rtap_len += 2;
5099
5100 put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
5101 rtap_len += 2;
5102
5103 put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
5104 rtap_len += 2;
5105
5106 put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
5107 rtap_len += 2;
5108
5109 put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
5110 rtap_len += 2;
5111
5112 put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
5113 rtap_len += 2;
5114 } else {
5115 put_unaligned_le16(rx_user_status->he_data1 |
5116 rx_status->he_data1, &rtap_buf[rtap_len]);
5117 rtap_len += 2;
5118
5119 put_unaligned_le16(rx_user_status->he_data2 |
5120 rx_status->he_data2, &rtap_buf[rtap_len]);
5121 rtap_len += 2;
5122
5123 put_unaligned_le16(rx_user_status->he_data3 |
5124 rx_status->he_data3, &rtap_buf[rtap_len]);
5125 rtap_len += 2;
5126
5127 put_unaligned_le16(rx_user_status->he_data4 |
5128 rx_status->he_data4, &rtap_buf[rtap_len]);
5129 rtap_len += 2;
5130
5131 put_unaligned_le16(rx_user_status->he_data5 |
5132 rx_status->he_data5, &rtap_buf[rtap_len]);
5133 rtap_len += 2;
5134
5135 put_unaligned_le16(rx_user_status->he_data6 |
5136 rx_status->he_data6, &rtap_buf[rtap_len]);
5137 rtap_len += 2;
5138 }
5139
5140 return rtap_len;
5141 }
5142
5143
5144 /**
5145 * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags
5146 * @rx_status: Pointer to rx_status.
5147 * @rtap_buf: buffer to which radiotap has to be updated
5148 * @rtap_len: radiotap length
5149 *
5150 * API update HE-MU fields in the radiotap header
5151 *
5152 * Return: length of rtap_len updated.
5153 */
5154 static unsigned int
qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status * rx_status,int8_t * rtap_buf,uint32_t rtap_len)5155 qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status,
5156 int8_t *rtap_buf, uint32_t rtap_len)
5157 {
5158 struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
5159
5160 rtap_len = qdf_align(rtap_len, 2);
5161
5162 /*
5163 * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4]
5164 * Enable all "known" he-mu radiotap flags for now
5165 */
5166
5167 if (!rx_user_status) {
5168 put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
5169 rtap_len += 2;
5170
5171 put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
5172 rtap_len += 2;
5173
5174 rtap_buf[rtap_len] = rx_status->he_RU[0];
5175 rtap_len += 1;
5176
5177 rtap_buf[rtap_len] = rx_status->he_RU[1];
5178 rtap_len += 1;
5179
5180 rtap_buf[rtap_len] = rx_status->he_RU[2];
5181 rtap_len += 1;
5182
5183 rtap_buf[rtap_len] = rx_status->he_RU[3];
5184 rtap_len += 1;
5185 } else {
5186 put_unaligned_le16(rx_user_status->he_flags1,
5187 &rtap_buf[rtap_len]);
5188 rtap_len += 2;
5189
5190 put_unaligned_le16(rx_user_status->he_flags2,
5191 &rtap_buf[rtap_len]);
5192 rtap_len += 2;
5193
5194 rtap_buf[rtap_len] = rx_user_status->he_RU[0];
5195 rtap_len += 1;
5196
5197 rtap_buf[rtap_len] = rx_user_status->he_RU[1];
5198 rtap_len += 1;
5199
5200 rtap_buf[rtap_len] = rx_user_status->he_RU[2];
5201 rtap_len += 1;
5202
5203 rtap_buf[rtap_len] = rx_user_status->he_RU[3];
5204 rtap_len += 1;
5205 qdf_debug("he_flags %x %x he-RU %x %x %x %x",
5206 rx_user_status->he_flags1,
5207 rx_user_status->he_flags2, rx_user_status->he_RU[0],
5208 rx_user_status->he_RU[1], rx_user_status->he_RU[2],
5209 rx_user_status->he_RU[3]);
5210 }
5211
5212 return rtap_len;
5213 }
5214
5215 /**
5216 * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags
5217 * @rx_status: Pointer to rx_status.
5218 * @rtap_buf: buffer to which radiotap has to be updated
5219 * @rtap_len: radiotap length
5220 *
5221 * API update he-mu-other fields in the radiotap header
5222 *
5223 * Return: length of rtap_len updated.
5224 */
5225 static unsigned int
qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status * rx_status,int8_t * rtap_buf,uint32_t rtap_len)5226 qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status,
5227 int8_t *rtap_buf, uint32_t rtap_len)
5228 {
5229 struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
5230
5231 rtap_len = qdf_align(rtap_len, 2);
5232
5233 /*
5234 * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8
5235 * Enable all "known" he-mu-other radiotap flags for now
5236 */
5237 if (!rx_user_status) {
5238 put_unaligned_le16(rx_status->he_per_user_1,
5239 &rtap_buf[rtap_len]);
5240 rtap_len += 2;
5241
5242 put_unaligned_le16(rx_status->he_per_user_2,
5243 &rtap_buf[rtap_len]);
5244 rtap_len += 2;
5245
5246 rtap_buf[rtap_len] = rx_status->he_per_user_position;
5247 rtap_len += 1;
5248
5249 rtap_buf[rtap_len] = rx_status->he_per_user_known;
5250 rtap_len += 1;
5251 } else {
5252 put_unaligned_le16(rx_user_status->he_per_user_1,
5253 &rtap_buf[rtap_len]);
5254 rtap_len += 2;
5255
5256 put_unaligned_le16(rx_user_status->he_per_user_2,
5257 &rtap_buf[rtap_len]);
5258 rtap_len += 2;
5259
5260 rtap_buf[rtap_len] = rx_user_status->he_per_user_position;
5261 rtap_len += 1;
5262
5263 rtap_buf[rtap_len] = rx_user_status->he_per_user_known;
5264 rtap_len += 1;
5265 }
5266
5267 return rtap_len;
5268 }
5269
5270 /**
5271 * qdf_nbuf_update_radiotap_usig_flags() - Update radiotap header with USIG data
5272 * from rx_status
5273 * @rx_status: Pointer to rx_status.
5274 * @rtap_buf: buffer to which radiotap has to be updated
5275 * @rtap_len: radiotap length
5276 *
5277 * API update Extra High Throughput (11be) fields in the radiotap header
5278 *
5279 * Return: length of rtap_len updated.
5280 */
5281 static unsigned int
qdf_nbuf_update_radiotap_usig_flags(struct mon_rx_status * rx_status,int8_t * rtap_buf,uint32_t rtap_len)5282 qdf_nbuf_update_radiotap_usig_flags(struct mon_rx_status *rx_status,
5283 int8_t *rtap_buf, uint32_t rtap_len)
5284 {
5285 /*
5286 * IEEE80211_RADIOTAP_USIG:
5287 * u32, u32, u32
5288 */
5289 rtap_len = qdf_align(rtap_len, 4);
5290
5291 put_unaligned_le32(rx_status->usig_common, &rtap_buf[rtap_len]);
5292 rtap_len += 4;
5293
5294 put_unaligned_le32(rx_status->usig_value, &rtap_buf[rtap_len]);
5295 rtap_len += 4;
5296
5297 put_unaligned_le32(rx_status->usig_mask, &rtap_buf[rtap_len]);
5298 rtap_len += 4;
5299
5300 qdf_rl_debug("U-SIG data %x %x %x",
5301 rx_status->usig_common, rx_status->usig_value,
5302 rx_status->usig_mask);
5303
5304 return rtap_len;
5305 }
5306
5307 /**
5308 * qdf_nbuf_update_radiotap_eht_flags() - Update radiotap header with EHT data
5309 * from rx_status
5310 * @rx_status: Pointer to rx_status.
5311 * @rtap_buf: buffer to which radiotap has to be updated
5312 * @rtap_len: radiotap length
5313 *
5314 * API update Extra High Throughput (11be) fields in the radiotap header
5315 *
5316 * Return: length of rtap_len updated.
5317 */
5318 static unsigned int
qdf_nbuf_update_radiotap_eht_flags(struct mon_rx_status * rx_status,int8_t * rtap_buf,uint32_t rtap_len)5319 qdf_nbuf_update_radiotap_eht_flags(struct mon_rx_status *rx_status,
5320 int8_t *rtap_buf, uint32_t rtap_len)
5321 {
5322 struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
5323 /*
5324 * IEEE80211_RADIOTAP_EHT:
5325 * u32, u32, u32, u32, u32, u32, u32, u16, [u32, u32, u32]
5326 */
5327 rtap_len = qdf_align(rtap_len, 4);
5328
5329 if (!rx_user_status) {
5330 put_unaligned_le32(rx_status->eht_known, &rtap_buf[rtap_len]);
5331 rtap_len += 4;
5332
5333 put_unaligned_le32(rx_status->eht_data[0], &rtap_buf[rtap_len]);
5334 rtap_len += 4;
5335
5336 put_unaligned_le32(rx_status->eht_data[1], &rtap_buf[rtap_len]);
5337 rtap_len += 4;
5338 } else {
5339 put_unaligned_le32(rx_status->eht_known |
5340 rx_user_status->eht_known,
5341 &rtap_buf[rtap_len]);
5342 rtap_len += 4;
5343
5344 put_unaligned_le32(rx_status->eht_data[0] |
5345 rx_user_status->eht_data[0],
5346 &rtap_buf[rtap_len]);
5347 rtap_len += 4;
5348
5349 put_unaligned_le32(rx_status->eht_data[1] |
5350 rx_user_status->eht_data[1],
5351 &rtap_buf[rtap_len]);
5352 rtap_len += 4;
5353 }
5354
5355 put_unaligned_le32(rx_status->eht_data[2], &rtap_buf[rtap_len]);
5356 rtap_len += 4;
5357
5358 put_unaligned_le32(rx_status->eht_data[3], &rtap_buf[rtap_len]);
5359 rtap_len += 4;
5360
5361 put_unaligned_le32(rx_status->eht_data[4], &rtap_buf[rtap_len]);
5362 rtap_len += 4;
5363
5364 put_unaligned_le32(rx_status->eht_data[5], &rtap_buf[rtap_len]);
5365 rtap_len += 4;
5366
5367 if (!rx_user_status) {
5368 qdf_rl_debug("EHT data %x %x %x %x %x %x %x",
5369 rx_status->eht_known, rx_status->eht_data[0],
5370 rx_status->eht_data[1], rx_status->eht_data[2],
5371 rx_status->eht_data[3], rx_status->eht_data[4],
5372 rx_status->eht_data[5]);
5373 } else {
5374 put_unaligned_le32(rx_user_status->eht_user_info, &rtap_buf[rtap_len]);
5375 rtap_len += 4;
5376
5377 qdf_rl_debug("EHT data %x %x %x %x %x %x %x",
5378 rx_status->eht_known | rx_user_status->eht_known,
5379 rx_status->eht_data[0] |
5380 rx_user_status->eht_data[0],
5381 rx_status->eht_data[1] |
5382 rx_user_status->eht_data[1],
5383 rx_status->eht_data[2], rx_status->eht_data[3],
5384 rx_status->eht_data[4], rx_status->eht_data[5]);
5385 }
5386
5387 return rtap_len;
5388 }
5389
5390 #define IEEE80211_RADIOTAP_TX_STATUS 0
5391 #define IEEE80211_RADIOTAP_RETRY_COUNT 1
5392 #define IEEE80211_RADIOTAP_EXTENSION2 2
5393 uint8_t ATH_OUI[] = {0x00, 0x03, 0x7f}; /* Atheros OUI */
5394
5395 /**
5396 * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags
5397 * @rx_status: Pointer to rx_status.
5398 * @rtap_buf: Buf to which AMPDU info has to be updated.
5399 * @rtap_len: Current length of radiotap buffer
5400 *
5401 * Return: Length of radiotap after AMPDU flags updated.
5402 */
qdf_nbuf_update_radiotap_ampdu_flags(struct mon_rx_status * rx_status,uint8_t * rtap_buf,uint32_t rtap_len)5403 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
5404 struct mon_rx_status *rx_status,
5405 uint8_t *rtap_buf,
5406 uint32_t rtap_len)
5407 {
5408 /*
5409 * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8
5410 * First 32 bits of AMPDU represents the reference number
5411 */
5412
5413 uint32_t ampdu_reference_num = rx_status->ppdu_id;
5414 uint16_t ampdu_flags = 0;
5415 uint16_t ampdu_reserved_flags = 0;
5416
5417 rtap_len = qdf_align(rtap_len, 4);
5418
5419 put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]);
5420 rtap_len += 4;
5421 put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]);
5422 rtap_len += 2;
5423 put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]);
5424 rtap_len += 2;
5425
5426 return rtap_len;
5427 }
5428
5429 #ifdef DP_MON_RSSI_IN_DBM
5430 #define QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status) \
5431 (rx_status->rssi_comb)
5432 #else
5433 #ifdef QCA_RSSI_DB2DBM
5434 #define QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status) \
5435 (((rx_status)->rssi_dbm_conv_support) ? \
5436 ((rx_status)->rssi_comb + (rx_status)->rssi_offset) :\
5437 ((rx_status)->rssi_comb + (rx_status)->chan_noise_floor))
5438 #else
5439 #define QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status) \
5440 (rx_status->rssi_comb + rx_status->chan_noise_floor)
5441 #endif
5442 #endif
5443
5444 /**
5445 * qdf_nbuf_update_radiotap_tx_flags() - Update radiotap header tx flags
5446 * @rx_status: Pointer to rx_status.
5447 * @rtap_buf: Buf to which tx info has to be updated.
5448 * @rtap_len: Current length of radiotap buffer
5449 *
5450 * Return: Length of radiotap after tx flags updated.
5451 */
qdf_nbuf_update_radiotap_tx_flags(struct mon_rx_status * rx_status,uint8_t * rtap_buf,uint32_t rtap_len)5452 static unsigned int qdf_nbuf_update_radiotap_tx_flags(
5453 struct mon_rx_status *rx_status,
5454 uint8_t *rtap_buf,
5455 uint32_t rtap_len)
5456 {
5457 /*
5458 * IEEE80211_RADIOTAP_TX_FLAGS u16
5459 */
5460
5461 uint16_t tx_flags = 0;
5462
5463 rtap_len = qdf_align(rtap_len, 2);
5464
5465 switch (rx_status->tx_status) {
5466 case RADIOTAP_TX_STATUS_FAIL:
5467 tx_flags |= IEEE80211_RADIOTAP_F_TX_FAIL;
5468 break;
5469 case RADIOTAP_TX_STATUS_NOACK:
5470 tx_flags |= IEEE80211_RADIOTAP_F_TX_NOACK;
5471 break;
5472 }
5473 put_unaligned_le16(tx_flags, &rtap_buf[rtap_len]);
5474 rtap_len += 2;
5475
5476 return rtap_len;
5477 }
5478
qdf_nbuf_update_radiotap(struct mon_rx_status * rx_status,qdf_nbuf_t nbuf,uint32_t headroom_sz)5479 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
5480 qdf_nbuf_t nbuf, uint32_t headroom_sz)
5481 {
5482 uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0};
5483 struct ieee80211_radiotap_header *rthdr =
5484 (struct ieee80211_radiotap_header *)rtap_buf;
5485 uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header);
5486 uint32_t rtap_len = rtap_hdr_len;
5487 uint8_t length = rtap_len;
5488 struct qdf_radiotap_vendor_ns_ath *radiotap_vendor_ns_ath;
5489 struct qdf_radiotap_ext2 *rtap_ext2;
5490 struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
5491
5492 /* per user info */
5493 qdf_le32_t *it_present;
5494 uint32_t it_present_val;
5495 bool radiotap_ext1_hdr_present = false;
5496
5497 it_present = &rthdr->it_present;
5498
5499 /* Adding Extended Header space */
5500 if (rx_status->add_rtap_ext || rx_status->add_rtap_ext2 ||
5501 rx_status->usig_flags || rx_status->eht_flags) {
5502 rtap_hdr_len += RADIOTAP_HEADER_EXT_LEN;
5503 rtap_len = rtap_hdr_len;
5504 radiotap_ext1_hdr_present = true;
5505 }
5506
5507 length = rtap_len;
5508
5509 /* IEEE80211_RADIOTAP_TSFT __le64 microseconds*/
5510 it_present_val = (1 << IEEE80211_RADIOTAP_TSFT);
5511 put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]);
5512 rtap_len += 8;
5513
5514 /* IEEE80211_RADIOTAP_FLAGS u8 */
5515 it_present_val |= (1 << IEEE80211_RADIOTAP_FLAGS);
5516
5517 if (rx_status->rs_fcs_err)
5518 rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS;
5519
5520 rtap_buf[rtap_len] = rx_status->rtap_flags;
5521 rtap_len += 1;
5522
5523 /* IEEE80211_RADIOTAP_RATE u8 500kb/s */
5524 if (!rx_status->ht_flags && !rx_status->vht_flags &&
5525 !rx_status->he_flags && !rx_status->eht_flags) {
5526 it_present_val |= (1 << IEEE80211_RADIOTAP_RATE);
5527 rtap_buf[rtap_len] = rx_status->rate;
5528 } else
5529 rtap_buf[rtap_len] = 0;
5530 rtap_len += 1;
5531
5532 /* IEEE80211_RADIOTAP_CHANNEL 2 x __le16 MHz, bitmap */
5533 it_present_val |= (1 << IEEE80211_RADIOTAP_CHANNEL);
5534 put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]);
5535 rtap_len += 2;
5536 /* Channel flags. */
5537 if (rx_status->chan_freq > CHANNEL_FREQ_5150)
5538 rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL;
5539 else
5540 rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL;
5541 if (rx_status->cck_flag)
5542 rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL;
5543 if (rx_status->ofdm_flag)
5544 rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL;
5545 put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]);
5546 rtap_len += 2;
5547
5548 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8 decibels from one milliwatt
5549 * (dBm)
5550 */
5551 it_present_val |= (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
5552 /*
5553 * rssi_comb is int dB, need to convert it to dBm.
5554 * normalize value to noise floor of -96 dBm
5555 */
5556 rtap_buf[rtap_len] = QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status);
5557 rtap_len += 1;
5558
5559 /* RX signal noise floor */
5560 it_present_val |= (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
5561 rtap_buf[rtap_len] = (uint8_t)rx_status->chan_noise_floor;
5562 rtap_len += 1;
5563
5564 /* IEEE80211_RADIOTAP_ANTENNA u8 antenna index */
5565 it_present_val |= (1 << IEEE80211_RADIOTAP_ANTENNA);
5566 rtap_buf[rtap_len] = rx_status->nr_ant;
5567 rtap_len += 1;
5568
5569 if ((rtap_len - length) > RADIOTAP_FIXED_HEADER_LEN) {
5570 qdf_print("length is greater than RADIOTAP_FIXED_HEADER_LEN");
5571 return 0;
5572 }
5573
5574 /* update tx flags for pkt capture*/
5575 if (rx_status->add_rtap_ext) {
5576 it_present_val |=
5577 cpu_to_le32(1 << IEEE80211_RADIOTAP_TX_FLAGS);
5578 rtap_len = qdf_nbuf_update_radiotap_tx_flags(rx_status,
5579 rtap_buf,
5580 rtap_len);
5581
5582 if ((rtap_len - length) > RADIOTAP_TX_FLAGS_LEN) {
5583 qdf_print("length is greater than RADIOTAP_TX_FLAGS_LEN");
5584 return 0;
5585 }
5586 }
5587
5588 if (rx_status->ht_flags) {
5589 length = rtap_len;
5590 /* IEEE80211_RADIOTAP_VHT u8, u8, u8 */
5591 it_present_val |= (1 << IEEE80211_RADIOTAP_MCS);
5592 rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW |
5593 IEEE80211_RADIOTAP_MCS_HAVE_MCS |
5594 IEEE80211_RADIOTAP_MCS_HAVE_GI;
5595 rtap_len += 1;
5596
5597 if (rx_status->sgi)
5598 rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI;
5599 if (rx_status->bw)
5600 rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40;
5601 else
5602 rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20;
5603 rtap_len += 1;
5604
5605 rtap_buf[rtap_len] = rx_status->ht_mcs;
5606 rtap_len += 1;
5607
5608 if ((rtap_len - length) > RADIOTAP_HT_FLAGS_LEN) {
5609 qdf_print("length is greater than RADIOTAP_HT_FLAGS_LEN");
5610 return 0;
5611 }
5612 }
5613
5614 if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) {
5615 /* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */
5616 it_present_val |= (1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
5617 rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status,
5618 rtap_buf,
5619 rtap_len);
5620 }
5621
5622 if (rx_status->vht_flags) {
5623 length = rtap_len;
5624 /* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
5625 it_present_val |= (1 << IEEE80211_RADIOTAP_VHT);
5626 rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status,
5627 rtap_buf,
5628 rtap_len);
5629
5630 if ((rtap_len - length) > RADIOTAP_VHT_FLAGS_LEN) {
5631 qdf_print("length is greater than RADIOTAP_VHT_FLAGS_LEN");
5632 return 0;
5633 }
5634 }
5635
5636 if (rx_status->he_flags) {
5637 length = rtap_len;
5638 /* IEEE80211_RADIOTAP_HE */
5639 it_present_val |= (1 << IEEE80211_RADIOTAP_HE);
5640 rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status,
5641 rtap_buf,
5642 rtap_len);
5643
5644 if ((rtap_len - length) > RADIOTAP_HE_FLAGS_LEN) {
5645 qdf_print("length is greater than RADIOTAP_HE_FLAGS_LEN");
5646 return 0;
5647 }
5648 }
5649
5650 if (rx_status->he_mu_flags) {
5651 length = rtap_len;
5652 /* IEEE80211_RADIOTAP_HE-MU */
5653 it_present_val |= (1 << IEEE80211_RADIOTAP_HE_MU);
5654 rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status,
5655 rtap_buf,
5656 rtap_len);
5657
5658 if ((rtap_len - length) > RADIOTAP_HE_MU_FLAGS_LEN) {
5659 qdf_print("length is greater than RADIOTAP_HE_MU_FLAGS_LEN");
5660 return 0;
5661 }
5662 }
5663
5664 if (rx_status->he_mu_other_flags) {
5665 length = rtap_len;
5666 /* IEEE80211_RADIOTAP_HE-MU-OTHER */
5667 it_present_val |= (1 << IEEE80211_RADIOTAP_HE_MU_OTHER);
5668 rtap_len =
5669 qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status,
5670 rtap_buf,
5671 rtap_len);
5672
5673 if ((rtap_len - length) > RADIOTAP_HE_MU_OTHER_FLAGS_LEN) {
5674 qdf_print("length is greater than RADIOTAP_HE_MU_OTHER_FLAGS_LEN");
5675 return 0;
5676 }
5677 }
5678
5679 rtap_len = qdf_align(rtap_len, 2);
5680 /*
5681 * Radiotap Vendor Namespace
5682 */
5683 it_present_val |= (1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
5684 radiotap_vendor_ns_ath = (struct qdf_radiotap_vendor_ns_ath *)
5685 (rtap_buf + rtap_len);
5686 /*
5687 * Copy Atheros OUI - 3 bytes (4th byte is 0)
5688 */
5689 qdf_mem_copy(radiotap_vendor_ns_ath->hdr.oui, ATH_OUI, sizeof(ATH_OUI));
5690 /*
5691 * Name space selector = 0
5692 * We only will have one namespace for now
5693 */
5694 radiotap_vendor_ns_ath->hdr.selector = 0;
5695 radiotap_vendor_ns_ath->hdr.skip_length = cpu_to_le16(
5696 sizeof(*radiotap_vendor_ns_ath) -
5697 sizeof(radiotap_vendor_ns_ath->hdr));
5698 radiotap_vendor_ns_ath->device_id = cpu_to_le32(rx_status->device_id);
5699 radiotap_vendor_ns_ath->lsig = cpu_to_le32(rx_status->l_sig_a_info);
5700 radiotap_vendor_ns_ath->lsig_b = cpu_to_le32(rx_status->l_sig_b_info);
5701 radiotap_vendor_ns_ath->ppdu_start_timestamp =
5702 cpu_to_le32(rx_status->ppdu_timestamp);
5703 rtap_len += sizeof(*radiotap_vendor_ns_ath);
5704
5705 /* Move to next it_present */
5706 if (radiotap_ext1_hdr_present) {
5707 it_present_val |= (1 << IEEE80211_RADIOTAP_EXT);
5708 put_unaligned_le32(it_present_val, it_present);
5709 it_present_val = 0;
5710 it_present++;
5711 }
5712
5713 /* Add Extension to Radiotap Header & corresponding data */
5714 if (rx_status->add_rtap_ext) {
5715 it_present_val |= (1 << IEEE80211_RADIOTAP_TX_STATUS);
5716 it_present_val |= (1 << IEEE80211_RADIOTAP_RETRY_COUNT);
5717
5718 rtap_buf[rtap_len] = rx_status->tx_status;
5719 rtap_len += 1;
5720 rtap_buf[rtap_len] = rx_status->tx_retry_cnt;
5721 rtap_len += 1;
5722 }
5723
5724 /* Add Extension2 to Radiotap Header */
5725 if (rx_status->add_rtap_ext2) {
5726 it_present_val |= (1 << IEEE80211_RADIOTAP_EXTENSION2);
5727
5728 rtap_ext2 = (struct qdf_radiotap_ext2 *)(rtap_buf + rtap_len);
5729 rtap_ext2->ppdu_id = rx_status->ppdu_id;
5730 rtap_ext2->prev_ppdu_id = rx_status->prev_ppdu_id;
5731 if (!rx_user_status) {
5732 rtap_ext2->tid = rx_status->tid;
5733 rtap_ext2->start_seq = rx_status->start_seq;
5734 qdf_mem_copy(rtap_ext2->ba_bitmap,
5735 rx_status->ba_bitmap,
5736 8 * (sizeof(uint32_t)));
5737 } else {
5738 uint8_t ba_bitmap_sz = rx_user_status->ba_bitmap_sz;
5739
5740 /* set default bitmap sz if not set */
5741 ba_bitmap_sz = ba_bitmap_sz ? ba_bitmap_sz : 8;
5742 rtap_ext2->tid = rx_user_status->tid;
5743 rtap_ext2->start_seq = rx_user_status->start_seq;
5744 qdf_mem_copy(rtap_ext2->ba_bitmap,
5745 rx_user_status->ba_bitmap,
5746 ba_bitmap_sz * (sizeof(uint32_t)));
5747 }
5748
5749 rtap_len += sizeof(*rtap_ext2);
5750 }
5751
5752 if (rx_status->usig_flags) {
5753 length = rtap_len;
5754 /* IEEE80211_RADIOTAP_USIG */
5755 it_present_val |= (1 << IEEE80211_RADIOTAP_EXT1_USIG);
5756 rtap_len = qdf_nbuf_update_radiotap_usig_flags(rx_status,
5757 rtap_buf,
5758 rtap_len);
5759
5760 if ((rtap_len - length) > RADIOTAP_EHT_FLAGS_LEN) {
5761 qdf_print("length is greater than RADIOTAP_EHT_FLAGS_LEN");
5762 return 0;
5763 }
5764 }
5765
5766 if (rx_status->eht_flags) {
5767 length = rtap_len;
5768 /* IEEE80211_RADIOTAP_EHT */
5769 it_present_val |= (1 << IEEE80211_RADIOTAP_EXT1_EHT);
5770 rtap_len = qdf_nbuf_update_radiotap_eht_flags(rx_status,
5771 rtap_buf,
5772 rtap_len);
5773
5774 if ((rtap_len - length) > RADIOTAP_EHT_FLAGS_LEN) {
5775 qdf_print("length is greater than RADIOTAP_EHT_FLAGS_LEN");
5776 return 0;
5777 }
5778 }
5779
5780 put_unaligned_le32(it_present_val, it_present);
5781 rthdr->it_len = cpu_to_le16(rtap_len);
5782
5783 if (headroom_sz < rtap_len) {
5784 qdf_debug("DEBUG: Not enough space to update radiotap");
5785 return 0;
5786 }
5787
5788 qdf_nbuf_push_head(nbuf, rtap_len);
5789 qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len);
5790 return rtap_len;
5791 }
5792 #else
qdf_nbuf_update_radiotap_vht_flags(struct mon_rx_status * rx_status,int8_t * rtap_buf,uint32_t rtap_len)5793 static unsigned int qdf_nbuf_update_radiotap_vht_flags(
5794 struct mon_rx_status *rx_status,
5795 int8_t *rtap_buf,
5796 uint32_t rtap_len)
5797 {
5798 qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
5799 return 0;
5800 }
5801
qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status * rx_status,int8_t * rtap_buf,uint32_t rtap_len)5802 unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
5803 int8_t *rtap_buf, uint32_t rtap_len)
5804 {
5805 qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
5806 return 0;
5807 }
5808
qdf_nbuf_update_radiotap_ampdu_flags(struct mon_rx_status * rx_status,uint8_t * rtap_buf,uint32_t rtap_len)5809 static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
5810 struct mon_rx_status *rx_status,
5811 uint8_t *rtap_buf,
5812 uint32_t rtap_len)
5813 {
5814 qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
5815 return 0;
5816 }
5817
qdf_nbuf_update_radiotap(struct mon_rx_status * rx_status,qdf_nbuf_t nbuf,uint32_t headroom_sz)5818 unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
5819 qdf_nbuf_t nbuf, uint32_t headroom_sz)
5820 {
5821 qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
5822 return 0;
5823 }
5824 #endif
5825 qdf_export_symbol(qdf_nbuf_update_radiotap);
5826
__qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)5827 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)
5828 {
5829 nbuf_free_cb = cb_func_ptr;
5830 }
5831
5832 qdf_export_symbol(__qdf_nbuf_reg_free_cb);
5833
qdf_nbuf_classify_pkt(struct sk_buff * skb)5834 void qdf_nbuf_classify_pkt(struct sk_buff *skb)
5835 {
5836 struct ethhdr *eh = (struct ethhdr *)skb->data;
5837
5838 /* check destination mac address is broadcast/multicast */
5839 if (is_broadcast_ether_addr((uint8_t *)eh))
5840 QDF_NBUF_CB_SET_BCAST(skb);
5841 else if (is_multicast_ether_addr((uint8_t *)eh))
5842 QDF_NBUF_CB_SET_MCAST(skb);
5843
5844 if (qdf_nbuf_is_ipv4_arp_pkt(skb))
5845 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
5846 QDF_NBUF_CB_PACKET_TYPE_ARP;
5847 else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
5848 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
5849 QDF_NBUF_CB_PACKET_TYPE_DHCP;
5850 else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
5851 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
5852 QDF_NBUF_CB_PACKET_TYPE_EAPOL;
5853 else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
5854 QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
5855 QDF_NBUF_CB_PACKET_TYPE_WAPI;
5856 }
5857 qdf_export_symbol(qdf_nbuf_classify_pkt);
5858
__qdf_nbuf_init(__qdf_nbuf_t nbuf)5859 void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
5860 {
5861 qdf_nbuf_users_set(&nbuf->users, 1);
5862 nbuf->data = nbuf->head + NET_SKB_PAD;
5863 skb_reset_tail_pointer(nbuf);
5864 }
5865 qdf_export_symbol(__qdf_nbuf_init);
5866
5867 #ifdef WLAN_FEATURE_FASTPATH
qdf_nbuf_init_fast(qdf_nbuf_t nbuf)5868 void qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
5869 {
5870 qdf_nbuf_users_set(&nbuf->users, 1);
5871 skb_reset_tail_pointer(nbuf);
5872 }
5873 qdf_export_symbol(qdf_nbuf_init_fast);
5874 #endif /* WLAN_FEATURE_FASTPATH */
5875
5876
5877 #ifdef QDF_NBUF_GLOBAL_COUNT
__qdf_nbuf_mod_init(void)5878 void __qdf_nbuf_mod_init(void)
5879 {
5880 is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
5881 qdf_atomic_init(&nbuf_count);
5882 qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count);
5883 }
5884
__qdf_nbuf_mod_exit(void)5885 void __qdf_nbuf_mod_exit(void)
5886 {
5887 }
5888 #endif
5889
5890 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
__qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf,uint8_t idx,int offset)5891 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
5892 int offset)
5893 {
5894 unsigned int frag_offset;
5895 skb_frag_t *frag;
5896
5897 if (qdf_unlikely(idx >= __qdf_nbuf_get_nr_frags(nbuf)))
5898 return QDF_STATUS_E_FAILURE;
5899
5900 frag = &skb_shinfo(nbuf)->frags[idx];
5901 frag_offset = skb_frag_off(frag);
5902
5903 frag_offset += offset;
5904 skb_frag_off_set(frag, frag_offset);
5905
5906 __qdf_nbuf_trim_add_frag_size(nbuf, idx, -(offset), 0);
5907
5908 return QDF_STATUS_SUCCESS;
5909 }
5910
5911 #else
__qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf,uint8_t idx,int offset)5912 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
5913 int offset)
5914 {
5915 uint16_t frag_offset;
5916 skb_frag_t *frag;
5917
5918 if (qdf_unlikely(idx >= __qdf_nbuf_get_nr_frags(nbuf)))
5919 return QDF_STATUS_E_FAILURE;
5920
5921 frag = &skb_shinfo(nbuf)->frags[idx];
5922 frag_offset = frag->page_offset;
5923
5924 frag_offset += offset;
5925 frag->page_offset = frag_offset;
5926
5927 __qdf_nbuf_trim_add_frag_size(nbuf, idx, -(offset), 0);
5928
5929 return QDF_STATUS_SUCCESS;
5930 }
5931 #endif
5932
5933 qdf_export_symbol(__qdf_nbuf_move_frag_page_offset);
5934
__qdf_nbuf_remove_frag(__qdf_nbuf_t nbuf,uint16_t idx,uint16_t truesize)5935 void __qdf_nbuf_remove_frag(__qdf_nbuf_t nbuf,
5936 uint16_t idx,
5937 uint16_t truesize)
5938 {
5939 struct page *page;
5940 uint16_t frag_len;
5941
5942 page = skb_frag_page(&skb_shinfo(nbuf)->frags[idx]);
5943
5944 if (qdf_unlikely(!page))
5945 return;
5946
5947 frag_len = qdf_nbuf_get_frag_size_by_idx(nbuf, idx);
5948 put_page(page);
5949 nbuf->len -= frag_len;
5950 nbuf->data_len -= frag_len;
5951 nbuf->truesize -= truesize;
5952 skb_shinfo(nbuf)->nr_frags--;
5953 }
5954
5955 qdf_export_symbol(__qdf_nbuf_remove_frag);
5956
__qdf_nbuf_add_rx_frag(__qdf_frag_t buf,__qdf_nbuf_t nbuf,int offset,int frag_len,unsigned int truesize,bool take_frag_ref)5957 void __qdf_nbuf_add_rx_frag(__qdf_frag_t buf, __qdf_nbuf_t nbuf,
5958 int offset, int frag_len,
5959 unsigned int truesize, bool take_frag_ref)
5960 {
5961 struct page *page;
5962 int frag_offset;
5963 uint8_t nr_frag;
5964
5965 nr_frag = __qdf_nbuf_get_nr_frags(nbuf);
5966 qdf_assert_always(nr_frag < QDF_NBUF_MAX_FRAGS);
5967
5968 page = virt_to_head_page(buf);
5969 frag_offset = buf - page_address(page);
5970
5971 skb_add_rx_frag(nbuf, nr_frag, page,
5972 (frag_offset + offset),
5973 frag_len, truesize);
5974
5975 if (unlikely(take_frag_ref)) {
5976 qdf_frag_count_inc(QDF_NBUF_FRAG_DEBUG_COUNT_ONE);
5977 skb_frag_ref(nbuf, nr_frag);
5978 }
5979 }
5980
5981 qdf_export_symbol(__qdf_nbuf_add_rx_frag);
5982
5983 #ifdef NBUF_FRAG_MEMORY_DEBUG
5984
qdf_nbuf_move_frag_page_offset_debug(qdf_nbuf_t nbuf,uint8_t idx,int offset,const char * func,uint32_t line)5985 QDF_STATUS qdf_nbuf_move_frag_page_offset_debug(qdf_nbuf_t nbuf, uint8_t idx,
5986 int offset, const char *func,
5987 uint32_t line)
5988 {
5989 QDF_STATUS result;
5990 qdf_frag_t p_fragp, n_fragp;
5991
5992 p_fragp = qdf_nbuf_get_frag_addr(nbuf, idx);
5993 result = __qdf_nbuf_move_frag_page_offset(nbuf, idx, offset);
5994
5995 if (qdf_likely(is_initial_mem_debug_disabled))
5996 return result;
5997
5998 n_fragp = qdf_nbuf_get_frag_addr(nbuf, idx);
5999
6000 /*
6001 * Update frag address in frag debug tracker
6002 * when frag offset is successfully changed in skb
6003 */
6004 if (result == QDF_STATUS_SUCCESS)
6005 qdf_frag_debug_update_addr(p_fragp, n_fragp, func, line);
6006
6007 return result;
6008 }
6009
6010 qdf_export_symbol(qdf_nbuf_move_frag_page_offset_debug);
6011
qdf_nbuf_add_rx_frag_debug(qdf_frag_t buf,qdf_nbuf_t nbuf,int offset,int frag_len,unsigned int truesize,bool take_frag_ref,const char * func,uint32_t line)6012 void qdf_nbuf_add_rx_frag_debug(qdf_frag_t buf, qdf_nbuf_t nbuf,
6013 int offset, int frag_len,
6014 unsigned int truesize, bool take_frag_ref,
6015 const char *func, uint32_t line)
6016 {
6017 qdf_frag_t fragp;
6018 uint32_t num_nr_frags;
6019
6020 __qdf_nbuf_add_rx_frag(buf, nbuf, offset,
6021 frag_len, truesize, take_frag_ref);
6022
6023 if (qdf_likely(is_initial_mem_debug_disabled))
6024 return;
6025
6026 num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
6027
6028 qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
6029
6030 fragp = qdf_nbuf_get_frag_addr(nbuf, num_nr_frags - 1);
6031
6032 /* Update frag address in frag debug tracking table */
6033 if (fragp != buf && !take_frag_ref)
6034 qdf_frag_debug_update_addr(buf, fragp, func, line);
6035
6036 /* Update frag refcount in frag debug tracking table */
6037 qdf_frag_debug_refcount_inc(fragp, func, line);
6038 }
6039
6040 qdf_export_symbol(qdf_nbuf_add_rx_frag_debug);
6041
qdf_net_buf_debug_acquire_frag(qdf_nbuf_t buf,const char * func,uint32_t line)6042 void qdf_net_buf_debug_acquire_frag(qdf_nbuf_t buf, const char *func,
6043 uint32_t line)
6044 {
6045 uint32_t num_nr_frags;
6046 uint32_t idx = 0;
6047 qdf_nbuf_t ext_list;
6048 qdf_frag_t p_frag;
6049
6050 if (qdf_likely(is_initial_mem_debug_disabled))
6051 return;
6052
6053 if (qdf_unlikely(!buf))
6054 return;
6055
6056 /* Take care to update the refcount in the debug entries for frags */
6057 num_nr_frags = qdf_nbuf_get_nr_frags(buf);
6058
6059 qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
6060
6061 while (idx < num_nr_frags) {
6062 p_frag = qdf_nbuf_get_frag_addr(buf, idx);
6063 if (qdf_likely(p_frag))
6064 qdf_frag_debug_refcount_inc(p_frag, func, line);
6065 idx++;
6066 }
6067
6068 /*
6069 * Take care to update the refcount in the debug entries for the
6070 * frags attached to frag_list
6071 */
6072 ext_list = qdf_nbuf_get_ext_list(buf);
6073 while (ext_list) {
6074 idx = 0;
6075 num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
6076
6077 qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
6078
6079 while (idx < num_nr_frags) {
6080 p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
6081 if (qdf_likely(p_frag))
6082 qdf_frag_debug_refcount_inc(p_frag, func, line);
6083 idx++;
6084 }
6085 ext_list = qdf_nbuf_queue_next(ext_list);
6086 }
6087 }
6088
6089 qdf_export_symbol(qdf_net_buf_debug_acquire_frag);
6090
qdf_net_buf_debug_release_frag(qdf_nbuf_t buf,const char * func,uint32_t line)6091 void qdf_net_buf_debug_release_frag(qdf_nbuf_t buf, const char *func,
6092 uint32_t line)
6093 {
6094 uint32_t num_nr_frags;
6095 qdf_nbuf_t ext_list;
6096 uint32_t idx = 0;
6097 qdf_frag_t p_frag;
6098
6099 if (qdf_likely(is_initial_mem_debug_disabled))
6100 return;
6101
6102 if (qdf_unlikely(!buf))
6103 return;
6104
6105 /*
6106 * Decrement refcount for frag debug nodes only when last user
6107 * of nbuf calls this API so as to avoid decrementing refcount
6108 * on every call expect the last one in case where nbuf has multiple
6109 * users
6110 */
6111 if (qdf_nbuf_get_users(buf) > 1)
6112 return;
6113
6114 /* Take care to update the refcount in the debug entries for frags */
6115 num_nr_frags = qdf_nbuf_get_nr_frags(buf);
6116
6117 qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
6118
6119 while (idx < num_nr_frags) {
6120 p_frag = qdf_nbuf_get_frag_addr(buf, idx);
6121 if (qdf_likely(p_frag))
6122 qdf_frag_debug_refcount_dec(p_frag, func, line);
6123 idx++;
6124 }
6125
6126 /* Take care to update debug entries for frags attached to frag_list */
6127 ext_list = qdf_nbuf_get_ext_list(buf);
6128 while (ext_list) {
6129 if (qdf_nbuf_get_users(ext_list) == 1) {
6130 idx = 0;
6131 num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
6132 qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
6133 while (idx < num_nr_frags) {
6134 p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
6135 if (qdf_likely(p_frag))
6136 qdf_frag_debug_refcount_dec(p_frag,
6137 func, line);
6138 idx++;
6139 }
6140 }
6141 ext_list = qdf_nbuf_queue_next(ext_list);
6142 }
6143 }
6144
6145 qdf_export_symbol(qdf_net_buf_debug_release_frag);
6146
6147 QDF_STATUS
qdf_nbuf_remove_frag_debug(qdf_nbuf_t nbuf,uint16_t idx,uint16_t truesize,const char * func,uint32_t line)6148 qdf_nbuf_remove_frag_debug(qdf_nbuf_t nbuf,
6149 uint16_t idx,
6150 uint16_t truesize,
6151 const char *func,
6152 uint32_t line)
6153 {
6154 uint16_t num_frags;
6155 qdf_frag_t frag;
6156
6157 if (qdf_unlikely(!nbuf))
6158 return QDF_STATUS_E_INVAL;
6159
6160 num_frags = qdf_nbuf_get_nr_frags(nbuf);
6161 if (idx >= num_frags)
6162 return QDF_STATUS_E_INVAL;
6163
6164 if (qdf_likely(is_initial_mem_debug_disabled)) {
6165 __qdf_nbuf_remove_frag(nbuf, idx, truesize);
6166 return QDF_STATUS_SUCCESS;
6167 }
6168
6169 frag = qdf_nbuf_get_frag_addr(nbuf, idx);
6170 if (qdf_likely(frag))
6171 qdf_frag_debug_refcount_dec(frag, func, line);
6172
6173 __qdf_nbuf_remove_frag(nbuf, idx, truesize);
6174
6175 return QDF_STATUS_SUCCESS;
6176 }
6177
6178 qdf_export_symbol(qdf_nbuf_remove_frag_debug);
6179
6180 #endif /* NBUF_FRAG_MEMORY_DEBUG */
6181
qdf_get_nbuf_valid_frag(qdf_nbuf_t nbuf)6182 qdf_nbuf_t qdf_get_nbuf_valid_frag(qdf_nbuf_t nbuf)
6183 {
6184 qdf_nbuf_t last_nbuf;
6185 uint32_t num_frags;
6186
6187 if (qdf_unlikely(!nbuf))
6188 return NULL;
6189
6190 num_frags = qdf_nbuf_get_nr_frags(nbuf);
6191
6192 /* Check nbuf has enough memory to store frag memory */
6193 if (num_frags < QDF_NBUF_MAX_FRAGS)
6194 return nbuf;
6195
6196 if (!__qdf_nbuf_has_fraglist(nbuf))
6197 return NULL;
6198
6199 last_nbuf = __qdf_nbuf_get_last_frag_list_nbuf(nbuf);
6200 if (qdf_unlikely(!last_nbuf))
6201 return NULL;
6202
6203 num_frags = qdf_nbuf_get_nr_frags(last_nbuf);
6204 if (num_frags < QDF_NBUF_MAX_FRAGS)
6205 return last_nbuf;
6206
6207 return NULL;
6208 }
6209
6210 qdf_export_symbol(qdf_get_nbuf_valid_frag);
6211
6212 QDF_STATUS
qdf_nbuf_add_frag_debug(qdf_device_t osdev,qdf_frag_t buf,qdf_nbuf_t nbuf,int offset,int frag_len,unsigned int truesize,bool take_frag_ref,unsigned int minsize,const char * func,uint32_t line)6213 qdf_nbuf_add_frag_debug(qdf_device_t osdev, qdf_frag_t buf,
6214 qdf_nbuf_t nbuf, int offset,
6215 int frag_len, unsigned int truesize,
6216 bool take_frag_ref, unsigned int minsize,
6217 const char *func, uint32_t line)
6218 {
6219 qdf_nbuf_t cur_nbuf;
6220 qdf_nbuf_t this_nbuf;
6221
6222 cur_nbuf = nbuf;
6223 this_nbuf = nbuf;
6224
6225 if (qdf_unlikely(!frag_len || !buf)) {
6226 qdf_nofl_err("%s : %d frag[ buf[%pK] len[%d]] not valid\n",
6227 func, line,
6228 buf, frag_len);
6229 return QDF_STATUS_E_INVAL;
6230 }
6231
6232 this_nbuf = qdf_get_nbuf_valid_frag(this_nbuf);
6233
6234 if (this_nbuf) {
6235 cur_nbuf = this_nbuf;
6236 } else {
6237 /* allocate a dummy mpdu buffer of 64 bytes headroom */
6238 this_nbuf = qdf_nbuf_alloc(osdev, minsize, minsize, 4, false);
6239 if (qdf_unlikely(!this_nbuf)) {
6240 qdf_nofl_err("%s : %d no memory to allocate\n",
6241 func, line);
6242 return QDF_STATUS_E_NOMEM;
6243 }
6244 }
6245
6246 qdf_nbuf_add_rx_frag(buf, this_nbuf, offset, frag_len, truesize,
6247 take_frag_ref);
6248
6249 if (this_nbuf != cur_nbuf) {
6250 /* add new skb to frag list */
6251 qdf_nbuf_append_ext_list(nbuf, this_nbuf,
6252 qdf_nbuf_len(this_nbuf));
6253 }
6254
6255 return QDF_STATUS_SUCCESS;
6256 }
6257
6258 qdf_export_symbol(qdf_nbuf_add_frag_debug);
6259
6260 #ifdef MEMORY_DEBUG
qdf_nbuf_acquire_track_lock(uint32_t index,unsigned long irq_flag)6261 void qdf_nbuf_acquire_track_lock(uint32_t index,
6262 unsigned long irq_flag)
6263 {
6264 spin_lock_irqsave(&g_qdf_net_buf_track_lock[index],
6265 irq_flag);
6266 }
6267
qdf_nbuf_release_track_lock(uint32_t index,unsigned long irq_flag)6268 void qdf_nbuf_release_track_lock(uint32_t index,
6269 unsigned long irq_flag)
6270 {
6271 spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[index],
6272 irq_flag);
6273 }
6274
qdf_nbuf_get_track_tbl(uint32_t index)6275 QDF_NBUF_TRACK *qdf_nbuf_get_track_tbl(uint32_t index)
6276 {
6277 return gp_qdf_net_buf_track_tbl[index];
6278 }
6279 #endif /* MEMORY_DEBUG */
6280
6281 #ifdef ENHANCED_OS_ABSTRACTION
qdf_nbuf_set_timestamp(qdf_nbuf_t buf)6282 void qdf_nbuf_set_timestamp(qdf_nbuf_t buf)
6283 {
6284 __qdf_nbuf_set_timestamp(buf);
6285 }
6286
6287 qdf_export_symbol(qdf_nbuf_set_timestamp);
6288
qdf_nbuf_get_timestamp(qdf_nbuf_t buf)6289 uint64_t qdf_nbuf_get_timestamp(qdf_nbuf_t buf)
6290 {
6291 return __qdf_nbuf_get_timestamp(buf);
6292 }
6293
6294 qdf_export_symbol(qdf_nbuf_get_timestamp);
6295
qdf_nbuf_get_timestamp_us(qdf_nbuf_t buf)6296 uint64_t qdf_nbuf_get_timestamp_us(qdf_nbuf_t buf)
6297 {
6298 return __qdf_nbuf_get_timestamp_us(buf);
6299 }
6300
6301 qdf_export_symbol(qdf_nbuf_get_timestamp_us);
6302
qdf_nbuf_get_timedelta_us(qdf_nbuf_t buf)6303 uint64_t qdf_nbuf_get_timedelta_us(qdf_nbuf_t buf)
6304 {
6305 return __qdf_nbuf_get_timedelta_us(buf);
6306 }
6307
6308 qdf_export_symbol(qdf_nbuf_get_timedelta_us);
6309
qdf_nbuf_get_timedelta_ms(qdf_nbuf_t buf)6310 uint64_t qdf_nbuf_get_timedelta_ms(qdf_nbuf_t buf)
6311 {
6312 return __qdf_nbuf_get_timedelta_ms(buf);
6313 }
6314
6315 qdf_export_symbol(qdf_nbuf_get_timedelta_ms);
6316
qdf_nbuf_net_timedelta(qdf_ktime_t t)6317 qdf_ktime_t qdf_nbuf_net_timedelta(qdf_ktime_t t)
6318 {
6319 return __qdf_nbuf_net_timedelta(t);
6320 }
6321
6322 qdf_export_symbol(qdf_nbuf_net_timedelta);
6323 #endif
6324