xref: /wlan-driver/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name /**
21*5113495bSYour Name  * DOC: qdf_nbuf.c
22*5113495bSYour Name  * QCA driver framework(QDF) network buffer management APIs
23*5113495bSYour Name  */
24*5113495bSYour Name #include <linux/hashtable.h>
25*5113495bSYour Name #include <linux/kernel.h>
26*5113495bSYour Name #include <linux/version.h>
27*5113495bSYour Name #include <linux/skbuff.h>
28*5113495bSYour Name #include <linux/module.h>
29*5113495bSYour Name #include <linux/proc_fs.h>
30*5113495bSYour Name #include <linux/inetdevice.h>
31*5113495bSYour Name #include <qdf_atomic.h>
32*5113495bSYour Name #include <qdf_debugfs.h>
33*5113495bSYour Name #include <qdf_lock.h>
34*5113495bSYour Name #include <qdf_mem.h>
35*5113495bSYour Name #include <qdf_module.h>
36*5113495bSYour Name #include <qdf_nbuf.h>
37*5113495bSYour Name #include <qdf_status.h>
38*5113495bSYour Name #include "qdf_str.h"
39*5113495bSYour Name #include <qdf_trace.h>
40*5113495bSYour Name #include "qdf_tracker.h"
41*5113495bSYour Name #include <qdf_types.h>
42*5113495bSYour Name #include <net/ieee80211_radiotap.h>
43*5113495bSYour Name #include <pld_common.h>
44*5113495bSYour Name #include <qdf_crypto.h>
45*5113495bSYour Name #include <linux/igmp.h>
46*5113495bSYour Name #include <net/mld.h>
47*5113495bSYour Name 
48*5113495bSYour Name #if defined(FEATURE_TSO)
49*5113495bSYour Name #include <net/ipv6.h>
50*5113495bSYour Name #include <linux/ipv6.h>
51*5113495bSYour Name #include <linux/tcp.h>
52*5113495bSYour Name #include <linux/if_vlan.h>
53*5113495bSYour Name #include <linux/ip.h>
54*5113495bSYour Name #endif /* FEATURE_TSO */
55*5113495bSYour Name 
56*5113495bSYour Name #ifdef IPA_OFFLOAD
57*5113495bSYour Name #include <i_qdf_ipa_wdi3.h>
58*5113495bSYour Name #endif /* IPA_OFFLOAD */
59*5113495bSYour Name #include "qdf_ssr_driver_dump.h"
60*5113495bSYour Name 
61*5113495bSYour Name #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
62*5113495bSYour Name 
63*5113495bSYour Name #define qdf_nbuf_users_inc atomic_inc
64*5113495bSYour Name #define qdf_nbuf_users_dec atomic_dec
65*5113495bSYour Name #define qdf_nbuf_users_set atomic_set
66*5113495bSYour Name #define qdf_nbuf_users_read atomic_read
67*5113495bSYour Name #else
68*5113495bSYour Name #define qdf_nbuf_users_inc refcount_inc
69*5113495bSYour Name #define qdf_nbuf_users_dec refcount_dec
70*5113495bSYour Name #define qdf_nbuf_users_set refcount_set
71*5113495bSYour Name #define qdf_nbuf_users_read refcount_read
72*5113495bSYour Name #endif /* KERNEL_VERSION(4, 13, 0) */
73*5113495bSYour Name 
74*5113495bSYour Name #define IEEE80211_RADIOTAP_VHT_BW_20	0
75*5113495bSYour Name #define IEEE80211_RADIOTAP_VHT_BW_40	1
76*5113495bSYour Name #define IEEE80211_RADIOTAP_VHT_BW_80	2
77*5113495bSYour Name #define IEEE80211_RADIOTAP_VHT_BW_160	3
78*5113495bSYour Name 
79*5113495bSYour Name #define RADIOTAP_VHT_BW_20	0
80*5113495bSYour Name #define RADIOTAP_VHT_BW_40	1
81*5113495bSYour Name #define RADIOTAP_VHT_BW_80	4
82*5113495bSYour Name #define RADIOTAP_VHT_BW_160	11
83*5113495bSYour Name 
84*5113495bSYour Name /* tx status */
85*5113495bSYour Name #define RADIOTAP_TX_STATUS_FAIL		1
86*5113495bSYour Name #define RADIOTAP_TX_STATUS_NOACK	2
87*5113495bSYour Name 
88*5113495bSYour Name /* channel number to freq conversion */
89*5113495bSYour Name #define CHANNEL_NUM_14 14
90*5113495bSYour Name #define CHANNEL_NUM_15 15
91*5113495bSYour Name #define CHANNEL_NUM_27 27
92*5113495bSYour Name #define CHANNEL_NUM_35 35
93*5113495bSYour Name #define CHANNEL_NUM_182 182
94*5113495bSYour Name #define CHANNEL_NUM_197 197
95*5113495bSYour Name #define CHANNEL_FREQ_2484 2484
96*5113495bSYour Name #define CHANNEL_FREQ_2407 2407
97*5113495bSYour Name #define CHANNEL_FREQ_2512 2512
98*5113495bSYour Name #define CHANNEL_FREQ_5000 5000
99*5113495bSYour Name #define CHANNEL_FREQ_4000 4000
100*5113495bSYour Name #define CHANNEL_FREQ_5150 5150
101*5113495bSYour Name #define FREQ_MULTIPLIER_CONST_5MHZ 5
102*5113495bSYour Name #define FREQ_MULTIPLIER_CONST_20MHZ 20
103*5113495bSYour Name #define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100
104*5113495bSYour Name #define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080
105*5113495bSYour Name #define RADIOTAP_CCK_CHANNEL 0x0020
106*5113495bSYour Name #define RADIOTAP_OFDM_CHANNEL 0x0040
107*5113495bSYour Name 
108*5113495bSYour Name #ifdef FEATURE_NBUFF_REPLENISH_TIMER
109*5113495bSYour Name #include <qdf_mc_timer.h>
110*5113495bSYour Name 
111*5113495bSYour Name struct qdf_track_timer {
112*5113495bSYour Name 	qdf_mc_timer_t track_timer;
113*5113495bSYour Name 	qdf_atomic_t alloc_fail_cnt;
114*5113495bSYour Name };
115*5113495bSYour Name 
116*5113495bSYour Name static struct qdf_track_timer alloc_track_timer;
117*5113495bSYour Name 
118*5113495bSYour Name #define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS  5000
119*5113495bSYour Name #define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD  50
120*5113495bSYour Name #endif
121*5113495bSYour Name 
122*5113495bSYour Name #ifdef NBUF_MEMORY_DEBUG
123*5113495bSYour Name /* SMMU crash indication*/
124*5113495bSYour Name static qdf_atomic_t smmu_crashed;
125*5113495bSYour Name /* Number of nbuf not added to history*/
126*5113495bSYour Name unsigned long g_histroy_add_drop;
127*5113495bSYour Name #endif
128*5113495bSYour Name 
129*5113495bSYour Name /* Packet Counter */
130*5113495bSYour Name static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
131*5113495bSYour Name static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
132*5113495bSYour Name #ifdef QDF_NBUF_GLOBAL_COUNT
133*5113495bSYour Name #define NBUF_DEBUGFS_NAME      "nbuf_counters"
134*5113495bSYour Name static qdf_atomic_t nbuf_count;
135*5113495bSYour Name #endif
136*5113495bSYour Name 
137*5113495bSYour Name #if defined(NBUF_MEMORY_DEBUG) || defined(QDF_NBUF_GLOBAL_COUNT)
138*5113495bSYour Name static bool is_initial_mem_debug_disabled;
139*5113495bSYour Name #endif
140*5113495bSYour Name 
141*5113495bSYour Name /**
142*5113495bSYour Name  *  __qdf_nbuf_get_ip_offset() - Get IPV4/V6 header offset
143*5113495bSYour Name  * @data: Pointer to network data buffer
144*5113495bSYour Name  *
145*5113495bSYour Name  * Get the IP header offset in case of 8021Q and 8021AD
146*5113495bSYour Name  * tag is present in L2 header.
147*5113495bSYour Name  *
148*5113495bSYour Name  * Return: IP header offset
149*5113495bSYour Name  */
__qdf_nbuf_get_ip_offset(uint8_t * data)150*5113495bSYour Name static inline uint8_t __qdf_nbuf_get_ip_offset(uint8_t *data)
151*5113495bSYour Name {
152*5113495bSYour Name 	uint16_t ether_type;
153*5113495bSYour Name 
154*5113495bSYour Name 	ether_type = *(uint16_t *)(data +
155*5113495bSYour Name 				   QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
156*5113495bSYour Name 
157*5113495bSYour Name 	if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q)))
158*5113495bSYour Name 		return QDF_NBUF_TRAC_VLAN_IP_OFFSET;
159*5113495bSYour Name 	else if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021AD)))
160*5113495bSYour Name 		return QDF_NBUF_TRAC_DOUBLE_VLAN_IP_OFFSET;
161*5113495bSYour Name 
162*5113495bSYour Name 	return QDF_NBUF_TRAC_IP_OFFSET;
163*5113495bSYour Name }
164*5113495bSYour Name 
165*5113495bSYour Name /**
166*5113495bSYour Name  *  __qdf_nbuf_get_ether_type() - Get the ether type
167*5113495bSYour Name  * @data: Pointer to network data buffer
168*5113495bSYour Name  *
169*5113495bSYour Name  * Get the ether type in case of 8021Q and 8021AD tag
170*5113495bSYour Name  * is present in L2 header, e.g for the returned ether type
171*5113495bSYour Name  * value, if IPV4 data ether type 0x0800, return 0x0008.
172*5113495bSYour Name  *
173*5113495bSYour Name  * Return ether type.
174*5113495bSYour Name  */
__qdf_nbuf_get_ether_type(uint8_t * data)175*5113495bSYour Name static inline uint16_t __qdf_nbuf_get_ether_type(uint8_t *data)
176*5113495bSYour Name {
177*5113495bSYour Name 	uint16_t ether_type;
178*5113495bSYour Name 
179*5113495bSYour Name 	ether_type = *(uint16_t *)(data +
180*5113495bSYour Name 				   QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
181*5113495bSYour Name 
182*5113495bSYour Name 	if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q)))
183*5113495bSYour Name 		ether_type = *(uint16_t *)(data +
184*5113495bSYour Name 				QDF_NBUF_TRAC_VLAN_ETH_TYPE_OFFSET);
185*5113495bSYour Name 	else if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021AD)))
186*5113495bSYour Name 		ether_type = *(uint16_t *)(data +
187*5113495bSYour Name 				QDF_NBUF_TRAC_DOUBLE_VLAN_ETH_TYPE_OFFSET);
188*5113495bSYour Name 
189*5113495bSYour Name 	return ether_type;
190*5113495bSYour Name }
191*5113495bSYour Name 
qdf_nbuf_tx_desc_count_display(void)192*5113495bSYour Name void qdf_nbuf_tx_desc_count_display(void)
193*5113495bSYour Name {
194*5113495bSYour Name 	qdf_debug("Current Snapshot of the Driver:");
195*5113495bSYour Name 	qdf_debug("Data Packets:");
196*5113495bSYour Name 	qdf_debug("HDD %d TXRX_Q %d TXRX %d HTT %d",
197*5113495bSYour Name 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
198*5113495bSYour Name 		  (nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
199*5113495bSYour Name 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
200*5113495bSYour Name 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
201*5113495bSYour Name 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
202*5113495bSYour Name 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
203*5113495bSYour Name 		  nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
204*5113495bSYour Name 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
205*5113495bSYour Name 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTT]  -
206*5113495bSYour Name 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
207*5113495bSYour Name 	qdf_debug(" HTC %d  HIF %d CE %d TX_COMP %d",
208*5113495bSYour Name 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
209*5113495bSYour Name 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
210*5113495bSYour Name 		  nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
211*5113495bSYour Name 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
212*5113495bSYour Name 		  nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
213*5113495bSYour Name 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
214*5113495bSYour Name 		  nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
215*5113495bSYour Name 	qdf_debug("Mgmt Packets:");
216*5113495bSYour Name 	qdf_debug("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d",
217*5113495bSYour Name 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
218*5113495bSYour Name 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
219*5113495bSYour Name 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
220*5113495bSYour Name 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
221*5113495bSYour Name 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
222*5113495bSYour Name 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
223*5113495bSYour Name 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
224*5113495bSYour Name 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
225*5113495bSYour Name 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
226*5113495bSYour Name 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
227*5113495bSYour Name 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
228*5113495bSYour Name 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
229*5113495bSYour Name 		  nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
230*5113495bSYour Name }
231*5113495bSYour Name qdf_export_symbol(qdf_nbuf_tx_desc_count_display);
232*5113495bSYour Name 
233*5113495bSYour Name /**
234*5113495bSYour Name  * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
235*5113495bSYour Name  * @packet_type   : packet type either mgmt/data
236*5113495bSYour Name  * @current_state : layer at which the packet currently present
237*5113495bSYour Name  *
238*5113495bSYour Name  * Return: none
239*5113495bSYour Name  */
qdf_nbuf_tx_desc_count_update(uint8_t packet_type,uint8_t current_state)240*5113495bSYour Name static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
241*5113495bSYour Name 			uint8_t current_state)
242*5113495bSYour Name {
243*5113495bSYour Name 	switch (packet_type) {
244*5113495bSYour Name 	case QDF_NBUF_TX_PKT_MGMT_TRACK:
245*5113495bSYour Name 		nbuf_tx_mgmt[current_state]++;
246*5113495bSYour Name 		break;
247*5113495bSYour Name 	case QDF_NBUF_TX_PKT_DATA_TRACK:
248*5113495bSYour Name 		nbuf_tx_data[current_state]++;
249*5113495bSYour Name 		break;
250*5113495bSYour Name 	default:
251*5113495bSYour Name 		break;
252*5113495bSYour Name 	}
253*5113495bSYour Name }
254*5113495bSYour Name 
qdf_nbuf_tx_desc_count_clear(void)255*5113495bSYour Name void qdf_nbuf_tx_desc_count_clear(void)
256*5113495bSYour Name {
257*5113495bSYour Name 	memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
258*5113495bSYour Name 	memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
259*5113495bSYour Name }
260*5113495bSYour Name qdf_export_symbol(qdf_nbuf_tx_desc_count_clear);
261*5113495bSYour Name 
qdf_nbuf_set_state(qdf_nbuf_t nbuf,uint8_t current_state)262*5113495bSYour Name void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
263*5113495bSYour Name {
264*5113495bSYour Name 	/*
265*5113495bSYour Name 	 * Only Mgmt, Data Packets are tracked. WMI messages
266*5113495bSYour Name 	 * such as scan commands are not tracked
267*5113495bSYour Name 	 */
268*5113495bSYour Name 	uint8_t packet_type;
269*5113495bSYour Name 
270*5113495bSYour Name 	packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
271*5113495bSYour Name 
272*5113495bSYour Name 	if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
273*5113495bSYour Name 		(packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
274*5113495bSYour Name 		return;
275*5113495bSYour Name 	}
276*5113495bSYour Name 	QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
277*5113495bSYour Name 	qdf_nbuf_tx_desc_count_update(packet_type,
278*5113495bSYour Name 					current_state);
279*5113495bSYour Name }
280*5113495bSYour Name qdf_export_symbol(qdf_nbuf_set_state);
281*5113495bSYour Name 
282*5113495bSYour Name #ifdef FEATURE_NBUFF_REPLENISH_TIMER
283*5113495bSYour Name /**
284*5113495bSYour Name  * __qdf_nbuf_start_replenish_timer() - Start alloc fail replenish timer
285*5113495bSYour Name  *
286*5113495bSYour Name  * This function starts the alloc fail replenish timer.
287*5113495bSYour Name  *
288*5113495bSYour Name  * Return: void
289*5113495bSYour Name  */
__qdf_nbuf_start_replenish_timer(void)290*5113495bSYour Name static inline void __qdf_nbuf_start_replenish_timer(void)
291*5113495bSYour Name {
292*5113495bSYour Name 	qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt);
293*5113495bSYour Name 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) !=
294*5113495bSYour Name 	    QDF_TIMER_STATE_RUNNING)
295*5113495bSYour Name 		qdf_mc_timer_start(&alloc_track_timer.track_timer,
296*5113495bSYour Name 				   QDF_NBUF_ALLOC_EXPIRE_TIMER_MS);
297*5113495bSYour Name }
298*5113495bSYour Name 
299*5113495bSYour Name /**
300*5113495bSYour Name  * __qdf_nbuf_stop_replenish_timer() - Stop alloc fail replenish timer
301*5113495bSYour Name  *
302*5113495bSYour Name  * This function stops the alloc fail replenish timer.
303*5113495bSYour Name  *
304*5113495bSYour Name  * Return: void
305*5113495bSYour Name  */
__qdf_nbuf_stop_replenish_timer(void)306*5113495bSYour Name static inline void __qdf_nbuf_stop_replenish_timer(void)
307*5113495bSYour Name {
308*5113495bSYour Name 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0)
309*5113495bSYour Name 		return;
310*5113495bSYour Name 
311*5113495bSYour Name 	qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0);
312*5113495bSYour Name 	if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) ==
313*5113495bSYour Name 	    QDF_TIMER_STATE_RUNNING)
314*5113495bSYour Name 		qdf_mc_timer_stop(&alloc_track_timer.track_timer);
315*5113495bSYour Name }
316*5113495bSYour Name 
317*5113495bSYour Name /**
318*5113495bSYour Name  * qdf_replenish_expire_handler() - Replenish expire handler
319*5113495bSYour Name  * @arg: unused callback argument
320*5113495bSYour Name  *
321*5113495bSYour Name  * This function triggers when the alloc fail replenish timer expires.
322*5113495bSYour Name  *
323*5113495bSYour Name  * Return: void
324*5113495bSYour Name  */
qdf_replenish_expire_handler(void * arg)325*5113495bSYour Name static void qdf_replenish_expire_handler(void *arg)
326*5113495bSYour Name {
327*5113495bSYour Name 	if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) >
328*5113495bSYour Name 	    QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) {
329*5113495bSYour Name 		qdf_print("ERROR: NBUF allocation timer expired Fail count %d",
330*5113495bSYour Name 			  qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt));
331*5113495bSYour Name 
332*5113495bSYour Name 		/* Error handling here */
333*5113495bSYour Name 	}
334*5113495bSYour Name }
335*5113495bSYour Name 
__qdf_nbuf_init_replenish_timer(void)336*5113495bSYour Name void __qdf_nbuf_init_replenish_timer(void)
337*5113495bSYour Name {
338*5113495bSYour Name 	qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW,
339*5113495bSYour Name 			  qdf_replenish_expire_handler, NULL);
340*5113495bSYour Name }
341*5113495bSYour Name 
__qdf_nbuf_deinit_replenish_timer(void)342*5113495bSYour Name void __qdf_nbuf_deinit_replenish_timer(void)
343*5113495bSYour Name {
344*5113495bSYour Name 	__qdf_nbuf_stop_replenish_timer();
345*5113495bSYour Name 	qdf_mc_timer_destroy(&alloc_track_timer.track_timer);
346*5113495bSYour Name }
347*5113495bSYour Name 
qdf_nbuf_stop_replenish_timer(void)348*5113495bSYour Name void qdf_nbuf_stop_replenish_timer(void)
349*5113495bSYour Name {
350*5113495bSYour Name 	__qdf_nbuf_stop_replenish_timer();
351*5113495bSYour Name }
352*5113495bSYour Name #else
353*5113495bSYour Name 
__qdf_nbuf_start_replenish_timer(void)354*5113495bSYour Name static inline void __qdf_nbuf_start_replenish_timer(void) {}
__qdf_nbuf_stop_replenish_timer(void)355*5113495bSYour Name static inline void __qdf_nbuf_stop_replenish_timer(void) {}
qdf_nbuf_stop_replenish_timer(void)356*5113495bSYour Name void qdf_nbuf_stop_replenish_timer(void)
357*5113495bSYour Name {
358*5113495bSYour Name }
359*5113495bSYour Name #endif
360*5113495bSYour Name 
361*5113495bSYour Name /* globals do not need to be initialized to NULL/0 */
362*5113495bSYour Name qdf_nbuf_trace_update_t qdf_trace_update_cb;
363*5113495bSYour Name qdf_nbuf_free_t nbuf_free_cb;
364*5113495bSYour Name 
365*5113495bSYour Name #ifdef QDF_NBUF_GLOBAL_COUNT
366*5113495bSYour Name 
__qdf_nbuf_count_get(void)367*5113495bSYour Name int __qdf_nbuf_count_get(void)
368*5113495bSYour Name {
369*5113495bSYour Name 	return qdf_atomic_read(&nbuf_count);
370*5113495bSYour Name }
371*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_count_get);
372*5113495bSYour Name 
__qdf_nbuf_count_inc(qdf_nbuf_t nbuf)373*5113495bSYour Name void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf)
374*5113495bSYour Name {
375*5113495bSYour Name 	int num_nbuf = 1;
376*5113495bSYour Name 	qdf_nbuf_t ext_list;
377*5113495bSYour Name 
378*5113495bSYour Name 	if (qdf_likely(is_initial_mem_debug_disabled))
379*5113495bSYour Name 		return;
380*5113495bSYour Name 
381*5113495bSYour Name 	ext_list = qdf_nbuf_get_ext_list(nbuf);
382*5113495bSYour Name 
383*5113495bSYour Name 	/* Take care to account for frag_list */
384*5113495bSYour Name 	while (ext_list) {
385*5113495bSYour Name 		++num_nbuf;
386*5113495bSYour Name 		ext_list = qdf_nbuf_queue_next(ext_list);
387*5113495bSYour Name 	}
388*5113495bSYour Name 
389*5113495bSYour Name 	qdf_atomic_add(num_nbuf, &nbuf_count);
390*5113495bSYour Name }
391*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_count_inc);
392*5113495bSYour Name 
__qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)393*5113495bSYour Name void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)
394*5113495bSYour Name {
395*5113495bSYour Name 	qdf_nbuf_t ext_list;
396*5113495bSYour Name 	int num_nbuf;
397*5113495bSYour Name 
398*5113495bSYour Name 	if (qdf_likely(is_initial_mem_debug_disabled))
399*5113495bSYour Name 		return;
400*5113495bSYour Name 
401*5113495bSYour Name 	if (qdf_nbuf_get_users(nbuf) > 1)
402*5113495bSYour Name 		return;
403*5113495bSYour Name 
404*5113495bSYour Name 	num_nbuf = 1;
405*5113495bSYour Name 
406*5113495bSYour Name 	/* Take care to account for frag_list */
407*5113495bSYour Name 	ext_list = qdf_nbuf_get_ext_list(nbuf);
408*5113495bSYour Name 	while (ext_list) {
409*5113495bSYour Name 		if (qdf_nbuf_get_users(ext_list) == 1)
410*5113495bSYour Name 			++num_nbuf;
411*5113495bSYour Name 		ext_list = qdf_nbuf_queue_next(ext_list);
412*5113495bSYour Name 	}
413*5113495bSYour Name 
414*5113495bSYour Name 	qdf_atomic_sub(num_nbuf, &nbuf_count);
415*5113495bSYour Name }
416*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_count_dec);
417*5113495bSYour Name #endif
418*5113495bSYour Name 
419*5113495bSYour Name #ifdef NBUF_FRAG_MEMORY_DEBUG
qdf_nbuf_frag_count_inc(qdf_nbuf_t nbuf)420*5113495bSYour Name void qdf_nbuf_frag_count_inc(qdf_nbuf_t nbuf)
421*5113495bSYour Name {
422*5113495bSYour Name 	qdf_nbuf_t ext_list;
423*5113495bSYour Name 	uint32_t num_nr_frags;
424*5113495bSYour Name 	uint32_t total_num_nr_frags;
425*5113495bSYour Name 
426*5113495bSYour Name 	if (qdf_likely(is_initial_mem_debug_disabled))
427*5113495bSYour Name 		return;
428*5113495bSYour Name 
429*5113495bSYour Name 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
430*5113495bSYour Name 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
431*5113495bSYour Name 
432*5113495bSYour Name 	total_num_nr_frags = num_nr_frags;
433*5113495bSYour Name 
434*5113495bSYour Name 	/* Take into account the frags attached to frag_list */
435*5113495bSYour Name 	ext_list = qdf_nbuf_get_ext_list(nbuf);
436*5113495bSYour Name 	while (ext_list) {
437*5113495bSYour Name 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
438*5113495bSYour Name 		qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
439*5113495bSYour Name 		total_num_nr_frags += num_nr_frags;
440*5113495bSYour Name 		ext_list = qdf_nbuf_queue_next(ext_list);
441*5113495bSYour Name 	}
442*5113495bSYour Name 
443*5113495bSYour Name 	qdf_frag_count_inc(total_num_nr_frags);
444*5113495bSYour Name }
445*5113495bSYour Name 
446*5113495bSYour Name qdf_export_symbol(qdf_nbuf_frag_count_inc);
447*5113495bSYour Name 
qdf_nbuf_frag_count_dec(qdf_nbuf_t nbuf)448*5113495bSYour Name void  qdf_nbuf_frag_count_dec(qdf_nbuf_t nbuf)
449*5113495bSYour Name {
450*5113495bSYour Name 	qdf_nbuf_t ext_list;
451*5113495bSYour Name 	uint32_t num_nr_frags;
452*5113495bSYour Name 	uint32_t total_num_nr_frags;
453*5113495bSYour Name 
454*5113495bSYour Name 	if (qdf_likely(is_initial_mem_debug_disabled))
455*5113495bSYour Name 		return;
456*5113495bSYour Name 
457*5113495bSYour Name 	if (qdf_nbuf_get_users(nbuf) > 1)
458*5113495bSYour Name 		return;
459*5113495bSYour Name 
460*5113495bSYour Name 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
461*5113495bSYour Name 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
462*5113495bSYour Name 
463*5113495bSYour Name 	total_num_nr_frags = num_nr_frags;
464*5113495bSYour Name 
465*5113495bSYour Name 	/* Take into account the frags attached to frag_list */
466*5113495bSYour Name 	ext_list = qdf_nbuf_get_ext_list(nbuf);
467*5113495bSYour Name 	while (ext_list) {
468*5113495bSYour Name 		if (qdf_nbuf_get_users(ext_list) == 1) {
469*5113495bSYour Name 			num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
470*5113495bSYour Name 			qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
471*5113495bSYour Name 			total_num_nr_frags += num_nr_frags;
472*5113495bSYour Name 		}
473*5113495bSYour Name 		ext_list = qdf_nbuf_queue_next(ext_list);
474*5113495bSYour Name 	}
475*5113495bSYour Name 
476*5113495bSYour Name 	qdf_frag_count_dec(total_num_nr_frags);
477*5113495bSYour Name }
478*5113495bSYour Name 
479*5113495bSYour Name qdf_export_symbol(qdf_nbuf_frag_count_dec);
480*5113495bSYour Name 
481*5113495bSYour Name #endif
482*5113495bSYour Name 
483*5113495bSYour Name static inline void
qdf_nbuf_set_defaults(struct sk_buff * skb,int align,int reserve)484*5113495bSYour Name qdf_nbuf_set_defaults(struct sk_buff *skb, int align, int reserve)
485*5113495bSYour Name {
486*5113495bSYour Name 	unsigned long offset;
487*5113495bSYour Name 
488*5113495bSYour Name 	memset(skb->cb, 0x0, sizeof(skb->cb));
489*5113495bSYour Name 	skb->dev = NULL;
490*5113495bSYour Name 
491*5113495bSYour Name 	/*
492*5113495bSYour Name 	 * The default is for netbuf fragments to be interpreted
493*5113495bSYour Name 	 * as wordstreams rather than bytestreams.
494*5113495bSYour Name 	 */
495*5113495bSYour Name 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
496*5113495bSYour Name 	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
497*5113495bSYour Name 
498*5113495bSYour Name 	/*
499*5113495bSYour Name 	 * XXX:how about we reserve first then align
500*5113495bSYour Name 	 * Align & make sure that the tail & data are adjusted properly
501*5113495bSYour Name 	 */
502*5113495bSYour Name 
503*5113495bSYour Name 	if (align) {
504*5113495bSYour Name 		offset = ((unsigned long)skb->data) % align;
505*5113495bSYour Name 		if (offset)
506*5113495bSYour Name 			skb_reserve(skb, align - offset);
507*5113495bSYour Name 	}
508*5113495bSYour Name 
509*5113495bSYour Name 	/*
510*5113495bSYour Name 	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
511*5113495bSYour Name 	 * pointer
512*5113495bSYour Name 	 */
513*5113495bSYour Name 	skb_reserve(skb, reserve);
514*5113495bSYour Name 	qdf_nbuf_count_inc(skb);
515*5113495bSYour Name }
516*5113495bSYour Name 
517*5113495bSYour Name #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
518*5113495bSYour Name 	!defined(QCA_WIFI_QCN9000)
__qdf_nbuf_alloc(qdf_device_t osdev,size_t size,int reserve,int align,int prio,const char * func,uint32_t line)519*5113495bSYour Name struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
520*5113495bSYour Name 				 int align, int prio, const char *func,
521*5113495bSYour Name 				 uint32_t line)
522*5113495bSYour Name {
523*5113495bSYour Name 	struct sk_buff *skb;
524*5113495bSYour Name 	uint32_t lowmem_alloc_tries = 0;
525*5113495bSYour Name 
526*5113495bSYour Name 	if (align)
527*5113495bSYour Name 		size += (align - 1);
528*5113495bSYour Name 
529*5113495bSYour Name realloc:
530*5113495bSYour Name 	skb = dev_alloc_skb(size);
531*5113495bSYour Name 
532*5113495bSYour Name 	if (skb)
533*5113495bSYour Name 		goto skb_alloc;
534*5113495bSYour Name 
535*5113495bSYour Name 	skb = pld_nbuf_pre_alloc(size);
536*5113495bSYour Name 
537*5113495bSYour Name 	if (!skb) {
538*5113495bSYour Name 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
539*5113495bSYour Name 				size, func, line);
540*5113495bSYour Name 		return NULL;
541*5113495bSYour Name 	}
542*5113495bSYour Name 
543*5113495bSYour Name skb_alloc:
544*5113495bSYour Name 	/* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040
545*5113495bSYour Name 	 * Though we are trying to reserve low memory upfront to prevent this,
546*5113495bSYour Name 	 * we sometimes see SKBs allocated from low memory.
547*5113495bSYour Name 	 */
548*5113495bSYour Name 	if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) {
549*5113495bSYour Name 		lowmem_alloc_tries++;
550*5113495bSYour Name 		if (lowmem_alloc_tries > 100) {
551*5113495bSYour Name 			qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d",
552*5113495bSYour Name 				     size, func, line);
553*5113495bSYour Name 			return NULL;
554*5113495bSYour Name 		} else {
555*5113495bSYour Name 			/* Not freeing to make sure it
556*5113495bSYour Name 			 * will not get allocated again
557*5113495bSYour Name 			 */
558*5113495bSYour Name 			goto realloc;
559*5113495bSYour Name 		}
560*5113495bSYour Name 	}
561*5113495bSYour Name 
562*5113495bSYour Name 	qdf_nbuf_set_defaults(skb, align, reserve);
563*5113495bSYour Name 
564*5113495bSYour Name 	return skb;
565*5113495bSYour Name }
566*5113495bSYour Name #else
567*5113495bSYour Name 
568*5113495bSYour Name #ifdef QCA_DP_NBUF_FAST_RECYCLE_CHECK
__qdf_nbuf_alloc(qdf_device_t osdev,size_t size,int reserve,int align,int prio,const char * func,uint32_t line)569*5113495bSYour Name struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
570*5113495bSYour Name 				 int align, int prio, const char *func,
571*5113495bSYour Name 				 uint32_t line)
572*5113495bSYour Name {
573*5113495bSYour Name 	return __qdf_nbuf_frag_alloc(osdev, size, reserve, align, prio, func,
574*5113495bSYour Name 				     line);
575*5113495bSYour Name }
576*5113495bSYour Name 
577*5113495bSYour Name #else
__qdf_nbuf_alloc(qdf_device_t osdev,size_t size,int reserve,int align,int prio,const char * func,uint32_t line)578*5113495bSYour Name struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
579*5113495bSYour Name 				 int align, int prio, const char *func,
580*5113495bSYour Name 				 uint32_t line)
581*5113495bSYour Name {
582*5113495bSYour Name 	struct sk_buff *skb;
583*5113495bSYour Name 	int flags = GFP_KERNEL;
584*5113495bSYour Name 
585*5113495bSYour Name 	if (align)
586*5113495bSYour Name 		size += (align - 1);
587*5113495bSYour Name 
588*5113495bSYour Name 	if (in_interrupt() || irqs_disabled() || in_atomic())
589*5113495bSYour Name 		flags = GFP_ATOMIC;
590*5113495bSYour Name 
591*5113495bSYour Name 	skb =  alloc_skb(size, flags);
592*5113495bSYour Name 
593*5113495bSYour Name 	if (skb)
594*5113495bSYour Name 		goto skb_alloc;
595*5113495bSYour Name 
596*5113495bSYour Name 	skb = pld_nbuf_pre_alloc(size);
597*5113495bSYour Name 
598*5113495bSYour Name 	if (!skb) {
599*5113495bSYour Name 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
600*5113495bSYour Name 				size, func, line);
601*5113495bSYour Name 		__qdf_nbuf_start_replenish_timer();
602*5113495bSYour Name 		return NULL;
603*5113495bSYour Name 	}
604*5113495bSYour Name 
605*5113495bSYour Name 	__qdf_nbuf_stop_replenish_timer();
606*5113495bSYour Name 
607*5113495bSYour Name skb_alloc:
608*5113495bSYour Name 	qdf_nbuf_set_defaults(skb, align, reserve);
609*5113495bSYour Name 
610*5113495bSYour Name 	return skb;
611*5113495bSYour Name }
612*5113495bSYour Name #endif
613*5113495bSYour Name 
614*5113495bSYour Name #endif
615*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_alloc);
616*5113495bSYour Name 
__qdf_nbuf_frag_alloc(qdf_device_t osdev,size_t size,int reserve,int align,int prio,const char * func,uint32_t line)617*5113495bSYour Name struct sk_buff *__qdf_nbuf_frag_alloc(qdf_device_t osdev, size_t size,
618*5113495bSYour Name 				      int reserve, int align, int prio,
619*5113495bSYour Name 				      const char *func, uint32_t line)
620*5113495bSYour Name {
621*5113495bSYour Name 	struct sk_buff *skb;
622*5113495bSYour Name 	int flags = GFP_KERNEL & ~__GFP_DIRECT_RECLAIM;
623*5113495bSYour Name 	bool atomic = false;
624*5113495bSYour Name 
625*5113495bSYour Name 	if (align)
626*5113495bSYour Name 		size += (align - 1);
627*5113495bSYour Name 
628*5113495bSYour Name 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
629*5113495bSYour Name 		atomic = true;
630*5113495bSYour Name 		flags = GFP_ATOMIC;
631*5113495bSYour Name #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
632*5113495bSYour Name 		/*
633*5113495bSYour Name 		 * Observed that kcompactd burns out CPU to make order-3 page.
634*5113495bSYour Name 		 *__netdev_alloc_skb has 4k page fallback option just in case of
635*5113495bSYour Name 		 * failing high order page allocation so we don't need to be
636*5113495bSYour Name 		 * hard. Make kcompactd rest in piece.
637*5113495bSYour Name 		 */
638*5113495bSYour Name 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
639*5113495bSYour Name #endif
640*5113495bSYour Name 	}
641*5113495bSYour Name 
642*5113495bSYour Name 	skb = __netdev_alloc_skb(NULL, size, flags);
643*5113495bSYour Name 	if (skb)
644*5113495bSYour Name 		goto skb_alloc;
645*5113495bSYour Name 
646*5113495bSYour Name 	/* 32k page frag alloc failed, try page slab allocation */
647*5113495bSYour Name 	if (likely(!atomic))
648*5113495bSYour Name 		flags |= __GFP_DIRECT_RECLAIM;
649*5113495bSYour Name 
650*5113495bSYour Name 	skb = alloc_skb(size, flags);
651*5113495bSYour Name 	if (skb)
652*5113495bSYour Name 		goto skb_alloc;
653*5113495bSYour Name 
654*5113495bSYour Name 	skb = pld_nbuf_pre_alloc(size);
655*5113495bSYour Name 
656*5113495bSYour Name 	if (!skb) {
657*5113495bSYour Name 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
658*5113495bSYour Name 				size, func, line);
659*5113495bSYour Name 		__qdf_nbuf_start_replenish_timer();
660*5113495bSYour Name 		return NULL;
661*5113495bSYour Name 	}
662*5113495bSYour Name 
663*5113495bSYour Name 	__qdf_nbuf_stop_replenish_timer();
664*5113495bSYour Name 
665*5113495bSYour Name skb_alloc:
666*5113495bSYour Name 	qdf_nbuf_set_defaults(skb, align, reserve);
667*5113495bSYour Name 
668*5113495bSYour Name 	return skb;
669*5113495bSYour Name }
670*5113495bSYour Name 
671*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_frag_alloc);
672*5113495bSYour Name 
__qdf_nbuf_alloc_no_recycler(size_t size,int reserve,int align,const char * func,uint32_t line)673*5113495bSYour Name __qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align,
674*5113495bSYour Name 					  const char *func, uint32_t line)
675*5113495bSYour Name {
676*5113495bSYour Name 	qdf_nbuf_t nbuf;
677*5113495bSYour Name 	unsigned long offset;
678*5113495bSYour Name 
679*5113495bSYour Name 	if (align)
680*5113495bSYour Name 		size += (align - 1);
681*5113495bSYour Name 
682*5113495bSYour Name 	nbuf = alloc_skb(size, GFP_ATOMIC);
683*5113495bSYour Name 	if (!nbuf)
684*5113495bSYour Name 		goto ret_nbuf;
685*5113495bSYour Name 
686*5113495bSYour Name 	memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
687*5113495bSYour Name 
688*5113495bSYour Name 	skb_reserve(nbuf, reserve);
689*5113495bSYour Name 
690*5113495bSYour Name 	if (align) {
691*5113495bSYour Name 		offset = ((unsigned long)nbuf->data) % align;
692*5113495bSYour Name 		if (offset)
693*5113495bSYour Name 			skb_reserve(nbuf, align - offset);
694*5113495bSYour Name 	}
695*5113495bSYour Name 
696*5113495bSYour Name 	qdf_nbuf_count_inc(nbuf);
697*5113495bSYour Name 
698*5113495bSYour Name ret_nbuf:
699*5113495bSYour Name 	return nbuf;
700*5113495bSYour Name }
701*5113495bSYour Name 
702*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_alloc_no_recycler);
703*5113495bSYour Name 
__qdf_nbuf_free(struct sk_buff * skb)704*5113495bSYour Name void __qdf_nbuf_free(struct sk_buff *skb)
705*5113495bSYour Name {
706*5113495bSYour Name 	if (pld_nbuf_pre_alloc_free(skb))
707*5113495bSYour Name 		return;
708*5113495bSYour Name 
709*5113495bSYour Name 	qdf_nbuf_frag_count_dec(skb);
710*5113495bSYour Name 
711*5113495bSYour Name 	qdf_nbuf_count_dec(skb);
712*5113495bSYour Name 	if (nbuf_free_cb)
713*5113495bSYour Name 		nbuf_free_cb(skb);
714*5113495bSYour Name 	else
715*5113495bSYour Name 		dev_kfree_skb_any(skb);
716*5113495bSYour Name }
717*5113495bSYour Name 
718*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_free);
719*5113495bSYour Name 
__qdf_nbuf_clone(__qdf_nbuf_t skb)720*5113495bSYour Name __qdf_nbuf_t __qdf_nbuf_clone(__qdf_nbuf_t skb)
721*5113495bSYour Name {
722*5113495bSYour Name 	qdf_nbuf_t skb_new = NULL;
723*5113495bSYour Name 
724*5113495bSYour Name 	skb_new = skb_clone(skb, GFP_ATOMIC);
725*5113495bSYour Name 	if (skb_new) {
726*5113495bSYour Name 		qdf_nbuf_frag_count_inc(skb_new);
727*5113495bSYour Name 		qdf_nbuf_count_inc(skb_new);
728*5113495bSYour Name 	}
729*5113495bSYour Name 	return skb_new;
730*5113495bSYour Name }
731*5113495bSYour Name 
732*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_clone);
733*5113495bSYour Name 
734*5113495bSYour Name struct sk_buff *
__qdf_nbuf_page_frag_alloc(qdf_device_t osdev,size_t size,int reserve,int align,__qdf_frag_cache_t * pf_cache,const char * func,uint32_t line)735*5113495bSYour Name __qdf_nbuf_page_frag_alloc(qdf_device_t osdev, size_t size, int reserve,
736*5113495bSYour Name 			   int align, __qdf_frag_cache_t *pf_cache,
737*5113495bSYour Name 			   const char *func, uint32_t line)
738*5113495bSYour Name {
739*5113495bSYour Name 	struct sk_buff *skb;
740*5113495bSYour Name 	qdf_frag_t frag_data;
741*5113495bSYour Name 	size_t orig_size = size;
742*5113495bSYour Name 	int flags = GFP_KERNEL;
743*5113495bSYour Name 
744*5113495bSYour Name 	if (align)
745*5113495bSYour Name 		size += (align - 1);
746*5113495bSYour Name 
747*5113495bSYour Name 	size += NET_SKB_PAD;
748*5113495bSYour Name 	size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
749*5113495bSYour Name 	size = SKB_DATA_ALIGN(size);
750*5113495bSYour Name 
751*5113495bSYour Name 	if (in_interrupt() || irqs_disabled() || in_atomic())
752*5113495bSYour Name 		flags = GFP_ATOMIC;
753*5113495bSYour Name 
754*5113495bSYour Name 	frag_data = page_frag_alloc(pf_cache, size, flags);
755*5113495bSYour Name 	if (!frag_data) {
756*5113495bSYour Name 		qdf_rl_nofl_err("page frag alloc failed %zuB @ %s:%d",
757*5113495bSYour Name 				size, func, line);
758*5113495bSYour Name 		return __qdf_nbuf_alloc(osdev, orig_size, reserve, align, 0,
759*5113495bSYour Name 					func, line);
760*5113495bSYour Name 	}
761*5113495bSYour Name 
762*5113495bSYour Name 	skb = build_skb(frag_data, size);
763*5113495bSYour Name 	if (skb) {
764*5113495bSYour Name 		skb_reserve(skb, NET_SKB_PAD);
765*5113495bSYour Name 		goto skb_alloc;
766*5113495bSYour Name 	}
767*5113495bSYour Name 
768*5113495bSYour Name 	/* Free the data allocated from pf_cache */
769*5113495bSYour Name 	page_frag_free(frag_data);
770*5113495bSYour Name 
771*5113495bSYour Name 	size = orig_size + align - 1;
772*5113495bSYour Name 
773*5113495bSYour Name 	skb = pld_nbuf_pre_alloc(size);
774*5113495bSYour Name 	if (!skb) {
775*5113495bSYour Name 		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
776*5113495bSYour Name 				size, func, line);
777*5113495bSYour Name 		__qdf_nbuf_start_replenish_timer();
778*5113495bSYour Name 		return NULL;
779*5113495bSYour Name 	}
780*5113495bSYour Name 
781*5113495bSYour Name 	__qdf_nbuf_stop_replenish_timer();
782*5113495bSYour Name 
783*5113495bSYour Name skb_alloc:
784*5113495bSYour Name 	qdf_nbuf_set_defaults(skb, align, reserve);
785*5113495bSYour Name 
786*5113495bSYour Name 	return skb;
787*5113495bSYour Name }
788*5113495bSYour Name 
789*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_page_frag_alloc);
790*5113495bSYour Name 
791*5113495bSYour Name #ifdef QCA_DP_TX_NBUF_LIST_FREE
792*5113495bSYour Name void
__qdf_nbuf_dev_kfree_list(__qdf_nbuf_queue_head_t * nbuf_queue_head)793*5113495bSYour Name __qdf_nbuf_dev_kfree_list(__qdf_nbuf_queue_head_t *nbuf_queue_head)
794*5113495bSYour Name {
795*5113495bSYour Name 	dev_kfree_skb_list_fast(nbuf_queue_head);
796*5113495bSYour Name }
797*5113495bSYour Name #else
798*5113495bSYour Name void
__qdf_nbuf_dev_kfree_list(__qdf_nbuf_queue_head_t * nbuf_queue_head)799*5113495bSYour Name __qdf_nbuf_dev_kfree_list(__qdf_nbuf_queue_head_t *nbuf_queue_head)
800*5113495bSYour Name {
801*5113495bSYour Name }
802*5113495bSYour Name #endif
803*5113495bSYour Name 
804*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_dev_kfree_list);
805*5113495bSYour Name 
806*5113495bSYour Name #ifdef NBUF_MEMORY_DEBUG
807*5113495bSYour Name struct qdf_nbuf_event {
808*5113495bSYour Name 	qdf_nbuf_t nbuf;
809*5113495bSYour Name 	char func[QDF_MEM_FUNC_NAME_SIZE];
810*5113495bSYour Name 	uint32_t line;
811*5113495bSYour Name 	enum qdf_nbuf_event_type type;
812*5113495bSYour Name 	uint64_t timestamp;
813*5113495bSYour Name 	qdf_dma_addr_t iova;
814*5113495bSYour Name };
815*5113495bSYour Name 
816*5113495bSYour Name #ifndef QDF_NBUF_HISTORY_SIZE
817*5113495bSYour Name #define QDF_NBUF_HISTORY_SIZE 4096
818*5113495bSYour Name #endif
819*5113495bSYour Name static qdf_atomic_t qdf_nbuf_history_index;
820*5113495bSYour Name static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE];
821*5113495bSYour Name 
qdf_nbuf_ssr_register_region(void)822*5113495bSYour Name void qdf_nbuf_ssr_register_region(void)
823*5113495bSYour Name {
824*5113495bSYour Name 	qdf_ssr_driver_dump_register_region("qdf_nbuf_history",
825*5113495bSYour Name 					    qdf_nbuf_history,
826*5113495bSYour Name 					    sizeof(qdf_nbuf_history));
827*5113495bSYour Name }
828*5113495bSYour Name 
829*5113495bSYour Name qdf_export_symbol(qdf_nbuf_ssr_register_region);
830*5113495bSYour Name 
qdf_nbuf_ssr_unregister_region(void)831*5113495bSYour Name void qdf_nbuf_ssr_unregister_region(void)
832*5113495bSYour Name {
833*5113495bSYour Name 	qdf_ssr_driver_dump_unregister_region("qdf_nbuf_history");
834*5113495bSYour Name }
835*5113495bSYour Name 
836*5113495bSYour Name qdf_export_symbol(qdf_nbuf_ssr_unregister_region);
837*5113495bSYour Name 
qdf_nbuf_circular_index_next(qdf_atomic_t * index,int size)838*5113495bSYour Name static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size)
839*5113495bSYour Name {
840*5113495bSYour Name 	int32_t next = qdf_atomic_inc_return(index);
841*5113495bSYour Name 
842*5113495bSYour Name 	if (next == size)
843*5113495bSYour Name 		qdf_atomic_sub(size, index);
844*5113495bSYour Name 
845*5113495bSYour Name 	return next % size;
846*5113495bSYour Name }
847*5113495bSYour Name 
848*5113495bSYour Name void
qdf_nbuf_history_add(qdf_nbuf_t nbuf,const char * func,uint32_t line,enum qdf_nbuf_event_type type)849*5113495bSYour Name qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *func, uint32_t line,
850*5113495bSYour Name 		     enum qdf_nbuf_event_type type)
851*5113495bSYour Name {
852*5113495bSYour Name 	int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index,
853*5113495bSYour Name 						   QDF_NBUF_HISTORY_SIZE);
854*5113495bSYour Name 	struct qdf_nbuf_event *event = &qdf_nbuf_history[idx];
855*5113495bSYour Name 
856*5113495bSYour Name 	if (qdf_atomic_read(&smmu_crashed)) {
857*5113495bSYour Name 		g_histroy_add_drop++;
858*5113495bSYour Name 		return;
859*5113495bSYour Name 	}
860*5113495bSYour Name 
861*5113495bSYour Name 	event->nbuf = nbuf;
862*5113495bSYour Name 	qdf_str_lcopy(event->func, func, QDF_MEM_FUNC_NAME_SIZE);
863*5113495bSYour Name 	event->line = line;
864*5113495bSYour Name 	event->type = type;
865*5113495bSYour Name 	event->timestamp = qdf_get_log_timestamp();
866*5113495bSYour Name 	if (type == QDF_NBUF_MAP || type == QDF_NBUF_UNMAP ||
867*5113495bSYour Name 	    type == QDF_NBUF_SMMU_MAP || type == QDF_NBUF_SMMU_UNMAP)
868*5113495bSYour Name 		event->iova = QDF_NBUF_CB_PADDR(nbuf);
869*5113495bSYour Name 	else
870*5113495bSYour Name 		event->iova = 0;
871*5113495bSYour Name }
872*5113495bSYour Name 
qdf_set_smmu_fault_state(bool smmu_fault_state)873*5113495bSYour Name void qdf_set_smmu_fault_state(bool smmu_fault_state)
874*5113495bSYour Name {
875*5113495bSYour Name 	qdf_atomic_set(&smmu_crashed, smmu_fault_state);
876*5113495bSYour Name 	if (!smmu_fault_state)
877*5113495bSYour Name 		g_histroy_add_drop = 0;
878*5113495bSYour Name }
879*5113495bSYour Name qdf_export_symbol(qdf_set_smmu_fault_state);
880*5113495bSYour Name #endif /* NBUF_MEMORY_DEBUG */
881*5113495bSYour Name 
882*5113495bSYour Name #ifdef NBUF_SMMU_MAP_UNMAP_DEBUG
883*5113495bSYour Name #define qdf_nbuf_smmu_map_tracker_bits 11 /* 2048 buckets */
884*5113495bSYour Name qdf_tracker_declare(qdf_nbuf_smmu_map_tracker, qdf_nbuf_smmu_map_tracker_bits,
885*5113495bSYour Name 		    "nbuf map-no-unmap events", "nbuf map", "nbuf unmap");
886*5113495bSYour Name 
qdf_nbuf_smmu_map_tracking_init(void)887*5113495bSYour Name static void qdf_nbuf_smmu_map_tracking_init(void)
888*5113495bSYour Name {
889*5113495bSYour Name 	qdf_tracker_init(&qdf_nbuf_smmu_map_tracker);
890*5113495bSYour Name }
891*5113495bSYour Name 
qdf_nbuf_smmu_map_tracking_deinit(void)892*5113495bSYour Name static void qdf_nbuf_smmu_map_tracking_deinit(void)
893*5113495bSYour Name {
894*5113495bSYour Name 	qdf_tracker_deinit(&qdf_nbuf_smmu_map_tracker);
895*5113495bSYour Name }
896*5113495bSYour Name 
897*5113495bSYour Name static QDF_STATUS
qdf_nbuf_track_smmu_map(qdf_nbuf_t nbuf,const char * func,uint32_t line)898*5113495bSYour Name qdf_nbuf_track_smmu_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
899*5113495bSYour Name {
900*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
901*5113495bSYour Name 		return QDF_STATUS_SUCCESS;
902*5113495bSYour Name 
903*5113495bSYour Name 	return qdf_tracker_track(&qdf_nbuf_smmu_map_tracker, nbuf, func, line);
904*5113495bSYour Name }
905*5113495bSYour Name 
906*5113495bSYour Name static void
qdf_nbuf_untrack_smmu_map(qdf_nbuf_t nbuf,const char * func,uint32_t line)907*5113495bSYour Name qdf_nbuf_untrack_smmu_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
908*5113495bSYour Name {
909*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
910*5113495bSYour Name 		return;
911*5113495bSYour Name 
912*5113495bSYour Name 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_SMMU_UNMAP);
913*5113495bSYour Name 	qdf_tracker_untrack(&qdf_nbuf_smmu_map_tracker, nbuf, func, line);
914*5113495bSYour Name }
915*5113495bSYour Name 
qdf_nbuf_map_check_for_smmu_leaks(void)916*5113495bSYour Name void qdf_nbuf_map_check_for_smmu_leaks(void)
917*5113495bSYour Name {
918*5113495bSYour Name 	qdf_tracker_check_for_leaks(&qdf_nbuf_smmu_map_tracker);
919*5113495bSYour Name }
920*5113495bSYour Name 
921*5113495bSYour Name #ifdef IPA_OFFLOAD
qdf_nbuf_smmu_map_debug(qdf_nbuf_t nbuf,uint8_t hdl,uint8_t num_buffers,qdf_mem_info_t * info,const char * func,uint32_t line)922*5113495bSYour Name QDF_STATUS qdf_nbuf_smmu_map_debug(qdf_nbuf_t nbuf,
923*5113495bSYour Name 				   uint8_t hdl,
924*5113495bSYour Name 				   uint8_t num_buffers,
925*5113495bSYour Name 				   qdf_mem_info_t *info,
926*5113495bSYour Name 				   const char *func,
927*5113495bSYour Name 				   uint32_t line)
928*5113495bSYour Name {
929*5113495bSYour Name 	QDF_STATUS status;
930*5113495bSYour Name 
931*5113495bSYour Name 	status = qdf_nbuf_track_smmu_map(nbuf, func, line);
932*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status))
933*5113495bSYour Name 		return status;
934*5113495bSYour Name 
935*5113495bSYour Name 	status = __qdf_ipa_wdi_create_smmu_mapping(hdl, num_buffers, info);
936*5113495bSYour Name 
937*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status)) {
938*5113495bSYour Name 		qdf_nbuf_untrack_smmu_map(nbuf, func, line);
939*5113495bSYour Name 	} else {
940*5113495bSYour Name 		if (!is_initial_mem_debug_disabled)
941*5113495bSYour Name 			qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_MAP);
942*5113495bSYour Name 		qdf_net_buf_debug_update_smmu_map_node(nbuf, info->iova,
943*5113495bSYour Name 						       info->pa, func, line);
944*5113495bSYour Name 	}
945*5113495bSYour Name 
946*5113495bSYour Name 	return status;
947*5113495bSYour Name }
948*5113495bSYour Name 
949*5113495bSYour Name qdf_export_symbol(qdf_nbuf_smmu_map_debug);
950*5113495bSYour Name 
qdf_nbuf_smmu_unmap_debug(qdf_nbuf_t nbuf,uint8_t hdl,uint8_t num_buffers,qdf_mem_info_t * info,const char * func,uint32_t line)951*5113495bSYour Name QDF_STATUS qdf_nbuf_smmu_unmap_debug(qdf_nbuf_t nbuf,
952*5113495bSYour Name 				     uint8_t hdl,
953*5113495bSYour Name 				     uint8_t num_buffers,
954*5113495bSYour Name 				     qdf_mem_info_t *info,
955*5113495bSYour Name 				     const char *func,
956*5113495bSYour Name 				     uint32_t line)
957*5113495bSYour Name {
958*5113495bSYour Name 	QDF_STATUS status;
959*5113495bSYour Name 
960*5113495bSYour Name 	qdf_nbuf_untrack_smmu_map(nbuf, func, line);
961*5113495bSYour Name 	status = __qdf_ipa_wdi_release_smmu_mapping(hdl, num_buffers, info);
962*5113495bSYour Name 	qdf_net_buf_debug_update_smmu_unmap_node(nbuf, info->iova,
963*5113495bSYour Name 						 info->pa, func, line);
964*5113495bSYour Name 	return status;
965*5113495bSYour Name }
966*5113495bSYour Name 
967*5113495bSYour Name qdf_export_symbol(qdf_nbuf_smmu_unmap_debug);
968*5113495bSYour Name #endif /* IPA_OFFLOAD */
969*5113495bSYour Name 
qdf_nbuf_panic_on_free_if_smmu_mapped(qdf_nbuf_t nbuf,const char * func,uint32_t line)970*5113495bSYour Name static void qdf_nbuf_panic_on_free_if_smmu_mapped(qdf_nbuf_t nbuf,
971*5113495bSYour Name 						  const char *func,
972*5113495bSYour Name 						  uint32_t line)
973*5113495bSYour Name {
974*5113495bSYour Name 	char map_func[QDF_TRACKER_FUNC_SIZE];
975*5113495bSYour Name 	uint32_t map_line;
976*5113495bSYour Name 
977*5113495bSYour Name 	if (!qdf_tracker_lookup(&qdf_nbuf_smmu_map_tracker, nbuf,
978*5113495bSYour Name 				&map_func, &map_line))
979*5113495bSYour Name 		return;
980*5113495bSYour Name 
981*5113495bSYour Name 	QDF_MEMDEBUG_PANIC("Nbuf freed @ %s:%u while mapped from %s:%u",
982*5113495bSYour Name 			   func, line, map_func, map_line);
983*5113495bSYour Name }
984*5113495bSYour Name 
qdf_net_buf_update_smmu_params(QDF_NBUF_TRACK * p_node)985*5113495bSYour Name static inline void qdf_net_buf_update_smmu_params(QDF_NBUF_TRACK *p_node)
986*5113495bSYour Name {
987*5113495bSYour Name 	p_node->smmu_unmap_line_num = 0;
988*5113495bSYour Name 	p_node->is_nbuf_smmu_mapped = false;
989*5113495bSYour Name 	p_node->smmu_map_line_num = 0;
990*5113495bSYour Name 	p_node->smmu_map_func_name[0] = '\0';
991*5113495bSYour Name 	p_node->smmu_unmap_func_name[0] = '\0';
992*5113495bSYour Name 	p_node->smmu_unmap_iova_addr = 0;
993*5113495bSYour Name 	p_node->smmu_unmap_pa_addr = 0;
994*5113495bSYour Name 	p_node->smmu_map_iova_addr = 0;
995*5113495bSYour Name 	p_node->smmu_map_pa_addr = 0;
996*5113495bSYour Name }
997*5113495bSYour Name #else /* !NBUF_SMMU_MAP_UNMAP_DEBUG */
998*5113495bSYour Name #ifdef NBUF_MEMORY_DEBUG
qdf_nbuf_smmu_map_tracking_init(void)999*5113495bSYour Name static void qdf_nbuf_smmu_map_tracking_init(void)
1000*5113495bSYour Name {
1001*5113495bSYour Name }
1002*5113495bSYour Name 
qdf_nbuf_smmu_map_tracking_deinit(void)1003*5113495bSYour Name static void qdf_nbuf_smmu_map_tracking_deinit(void)
1004*5113495bSYour Name {
1005*5113495bSYour Name }
1006*5113495bSYour Name 
qdf_nbuf_panic_on_free_if_smmu_mapped(qdf_nbuf_t nbuf,const char * func,uint32_t line)1007*5113495bSYour Name static void qdf_nbuf_panic_on_free_if_smmu_mapped(qdf_nbuf_t nbuf,
1008*5113495bSYour Name 						  const char *func,
1009*5113495bSYour Name 						  uint32_t line)
1010*5113495bSYour Name {
1011*5113495bSYour Name }
1012*5113495bSYour Name 
qdf_net_buf_update_smmu_params(QDF_NBUF_TRACK * p_node)1013*5113495bSYour Name static inline void qdf_net_buf_update_smmu_params(QDF_NBUF_TRACK *p_node)
1014*5113495bSYour Name {
1015*5113495bSYour Name }
1016*5113495bSYour Name #endif /* NBUF_MEMORY_DEBUG */
1017*5113495bSYour Name 
1018*5113495bSYour Name #ifdef IPA_OFFLOAD
qdf_nbuf_smmu_map_debug(qdf_nbuf_t nbuf,uint8_t hdl,uint8_t num_buffers,qdf_mem_info_t * info,const char * func,uint32_t line)1019*5113495bSYour Name QDF_STATUS qdf_nbuf_smmu_map_debug(qdf_nbuf_t nbuf,
1020*5113495bSYour Name 				   uint8_t hdl,
1021*5113495bSYour Name 				   uint8_t num_buffers,
1022*5113495bSYour Name 				   qdf_mem_info_t *info,
1023*5113495bSYour Name 				   const char *func,
1024*5113495bSYour Name 				   uint32_t line)
1025*5113495bSYour Name {
1026*5113495bSYour Name 	return  __qdf_ipa_wdi_create_smmu_mapping(hdl, num_buffers, info);
1027*5113495bSYour Name }
1028*5113495bSYour Name 
1029*5113495bSYour Name qdf_export_symbol(qdf_nbuf_smmu_map_debug);
1030*5113495bSYour Name 
qdf_nbuf_smmu_unmap_debug(qdf_nbuf_t nbuf,uint8_t hdl,uint8_t num_buffers,qdf_mem_info_t * info,const char * func,uint32_t line)1031*5113495bSYour Name QDF_STATUS qdf_nbuf_smmu_unmap_debug(qdf_nbuf_t nbuf,
1032*5113495bSYour Name 				     uint8_t hdl,
1033*5113495bSYour Name 				     uint8_t num_buffers,
1034*5113495bSYour Name 				     qdf_mem_info_t *info,
1035*5113495bSYour Name 				     const char *func,
1036*5113495bSYour Name 				     uint32_t line)
1037*5113495bSYour Name {
1038*5113495bSYour Name 	return __qdf_ipa_wdi_release_smmu_mapping(hdl, num_buffers, info);
1039*5113495bSYour Name }
1040*5113495bSYour Name 
1041*5113495bSYour Name qdf_export_symbol(qdf_nbuf_smmu_unmap_debug);
1042*5113495bSYour Name #endif /* IPA_OFFLOAD */
1043*5113495bSYour Name #endif /* NBUF_SMMU_MAP_UNMAP_DEBUG */
1044*5113495bSYour Name 
1045*5113495bSYour Name #ifdef NBUF_MAP_UNMAP_DEBUG
1046*5113495bSYour Name #define qdf_nbuf_map_tracker_bits 11 /* 2048 buckets */
1047*5113495bSYour Name qdf_tracker_declare(qdf_nbuf_map_tracker, qdf_nbuf_map_tracker_bits,
1048*5113495bSYour Name 		    "nbuf map-no-unmap events", "nbuf map", "nbuf unmap");
1049*5113495bSYour Name 
qdf_nbuf_map_tracking_init(void)1050*5113495bSYour Name static void qdf_nbuf_map_tracking_init(void)
1051*5113495bSYour Name {
1052*5113495bSYour Name 	qdf_tracker_init(&qdf_nbuf_map_tracker);
1053*5113495bSYour Name }
1054*5113495bSYour Name 
qdf_nbuf_map_tracking_deinit(void)1055*5113495bSYour Name static void qdf_nbuf_map_tracking_deinit(void)
1056*5113495bSYour Name {
1057*5113495bSYour Name 	qdf_tracker_deinit(&qdf_nbuf_map_tracker);
1058*5113495bSYour Name }
1059*5113495bSYour Name 
1060*5113495bSYour Name static QDF_STATUS
qdf_nbuf_track_map(qdf_nbuf_t nbuf,const char * func,uint32_t line)1061*5113495bSYour Name qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
1062*5113495bSYour Name {
1063*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
1064*5113495bSYour Name 		return QDF_STATUS_SUCCESS;
1065*5113495bSYour Name 
1066*5113495bSYour Name 	return qdf_tracker_track(&qdf_nbuf_map_tracker, nbuf, func, line);
1067*5113495bSYour Name }
1068*5113495bSYour Name 
1069*5113495bSYour Name static void
qdf_nbuf_untrack_map(qdf_nbuf_t nbuf,const char * func,uint32_t line)1070*5113495bSYour Name qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
1071*5113495bSYour Name {
1072*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
1073*5113495bSYour Name 		return;
1074*5113495bSYour Name 
1075*5113495bSYour Name 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_UNMAP);
1076*5113495bSYour Name 	qdf_tracker_untrack(&qdf_nbuf_map_tracker, nbuf, func, line);
1077*5113495bSYour Name }
1078*5113495bSYour Name 
qdf_nbuf_map_check_for_leaks(void)1079*5113495bSYour Name void qdf_nbuf_map_check_for_leaks(void)
1080*5113495bSYour Name {
1081*5113495bSYour Name 	qdf_tracker_check_for_leaks(&qdf_nbuf_map_tracker);
1082*5113495bSYour Name }
1083*5113495bSYour Name 
qdf_nbuf_map_debug(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_dir_t dir,const char * func,uint32_t line)1084*5113495bSYour Name QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev,
1085*5113495bSYour Name 			      qdf_nbuf_t buf,
1086*5113495bSYour Name 			      qdf_dma_dir_t dir,
1087*5113495bSYour Name 			      const char *func,
1088*5113495bSYour Name 			      uint32_t line)
1089*5113495bSYour Name {
1090*5113495bSYour Name 	QDF_STATUS status;
1091*5113495bSYour Name 
1092*5113495bSYour Name 	status = qdf_nbuf_track_map(buf, func, line);
1093*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status))
1094*5113495bSYour Name 		return status;
1095*5113495bSYour Name 
1096*5113495bSYour Name 	status = __qdf_nbuf_map(osdev, buf, dir);
1097*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status)) {
1098*5113495bSYour Name 		qdf_nbuf_untrack_map(buf, func, line);
1099*5113495bSYour Name 	} else {
1100*5113495bSYour Name 		if (!is_initial_mem_debug_disabled)
1101*5113495bSYour Name 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
1102*5113495bSYour Name 		qdf_net_buf_debug_update_map_node(buf, func, line);
1103*5113495bSYour Name 	}
1104*5113495bSYour Name 
1105*5113495bSYour Name 	return status;
1106*5113495bSYour Name }
1107*5113495bSYour Name 
1108*5113495bSYour Name qdf_export_symbol(qdf_nbuf_map_debug);
1109*5113495bSYour Name 
qdf_nbuf_unmap_debug(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_dir_t dir,const char * func,uint32_t line)1110*5113495bSYour Name void qdf_nbuf_unmap_debug(qdf_device_t osdev,
1111*5113495bSYour Name 			  qdf_nbuf_t buf,
1112*5113495bSYour Name 			  qdf_dma_dir_t dir,
1113*5113495bSYour Name 			  const char *func,
1114*5113495bSYour Name 			  uint32_t line)
1115*5113495bSYour Name {
1116*5113495bSYour Name 	qdf_nbuf_untrack_map(buf, func, line);
1117*5113495bSYour Name 	__qdf_nbuf_unmap_single(osdev, buf, dir);
1118*5113495bSYour Name 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
1119*5113495bSYour Name }
1120*5113495bSYour Name 
1121*5113495bSYour Name qdf_export_symbol(qdf_nbuf_unmap_debug);
1122*5113495bSYour Name 
qdf_nbuf_map_single_debug(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_dir_t dir,const char * func,uint32_t line)1123*5113495bSYour Name QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev,
1124*5113495bSYour Name 				     qdf_nbuf_t buf,
1125*5113495bSYour Name 				     qdf_dma_dir_t dir,
1126*5113495bSYour Name 				     const char *func,
1127*5113495bSYour Name 				     uint32_t line)
1128*5113495bSYour Name {
1129*5113495bSYour Name 	QDF_STATUS status;
1130*5113495bSYour Name 
1131*5113495bSYour Name 	status = qdf_nbuf_track_map(buf, func, line);
1132*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status))
1133*5113495bSYour Name 		return status;
1134*5113495bSYour Name 
1135*5113495bSYour Name 	status = __qdf_nbuf_map_single(osdev, buf, dir);
1136*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status)) {
1137*5113495bSYour Name 		qdf_nbuf_untrack_map(buf, func, line);
1138*5113495bSYour Name 	} else {
1139*5113495bSYour Name 		if (!is_initial_mem_debug_disabled)
1140*5113495bSYour Name 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
1141*5113495bSYour Name 		qdf_net_buf_debug_update_map_node(buf, func, line);
1142*5113495bSYour Name 	}
1143*5113495bSYour Name 
1144*5113495bSYour Name 	return status;
1145*5113495bSYour Name }
1146*5113495bSYour Name 
1147*5113495bSYour Name qdf_export_symbol(qdf_nbuf_map_single_debug);
1148*5113495bSYour Name 
qdf_nbuf_unmap_single_debug(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_dir_t dir,const char * func,uint32_t line)1149*5113495bSYour Name void qdf_nbuf_unmap_single_debug(qdf_device_t osdev,
1150*5113495bSYour Name 				 qdf_nbuf_t buf,
1151*5113495bSYour Name 				 qdf_dma_dir_t dir,
1152*5113495bSYour Name 				 const char *func,
1153*5113495bSYour Name 				 uint32_t line)
1154*5113495bSYour Name {
1155*5113495bSYour Name 	qdf_nbuf_untrack_map(buf, func, line);
1156*5113495bSYour Name 	__qdf_nbuf_unmap_single(osdev, buf, dir);
1157*5113495bSYour Name 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
1158*5113495bSYour Name }
1159*5113495bSYour Name 
1160*5113495bSYour Name qdf_export_symbol(qdf_nbuf_unmap_single_debug);
1161*5113495bSYour Name 
qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_dir_t dir,int nbytes,const char * func,uint32_t line)1162*5113495bSYour Name QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,
1163*5113495bSYour Name 				     qdf_nbuf_t buf,
1164*5113495bSYour Name 				     qdf_dma_dir_t dir,
1165*5113495bSYour Name 				     int nbytes,
1166*5113495bSYour Name 				     const char *func,
1167*5113495bSYour Name 				     uint32_t line)
1168*5113495bSYour Name {
1169*5113495bSYour Name 	QDF_STATUS status;
1170*5113495bSYour Name 
1171*5113495bSYour Name 	status = qdf_nbuf_track_map(buf, func, line);
1172*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status))
1173*5113495bSYour Name 		return status;
1174*5113495bSYour Name 
1175*5113495bSYour Name 	status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes);
1176*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status)) {
1177*5113495bSYour Name 		qdf_nbuf_untrack_map(buf, func, line);
1178*5113495bSYour Name 	} else {
1179*5113495bSYour Name 		if (!is_initial_mem_debug_disabled)
1180*5113495bSYour Name 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
1181*5113495bSYour Name 		qdf_net_buf_debug_update_map_node(buf, func, line);
1182*5113495bSYour Name 	}
1183*5113495bSYour Name 
1184*5113495bSYour Name 	return status;
1185*5113495bSYour Name }
1186*5113495bSYour Name 
1187*5113495bSYour Name qdf_export_symbol(qdf_nbuf_map_nbytes_debug);
1188*5113495bSYour Name 
qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_dir_t dir,int nbytes,const char * func,uint32_t line)1189*5113495bSYour Name void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,
1190*5113495bSYour Name 				 qdf_nbuf_t buf,
1191*5113495bSYour Name 				 qdf_dma_dir_t dir,
1192*5113495bSYour Name 				 int nbytes,
1193*5113495bSYour Name 				 const char *func,
1194*5113495bSYour Name 				 uint32_t line)
1195*5113495bSYour Name {
1196*5113495bSYour Name 	qdf_nbuf_untrack_map(buf, func, line);
1197*5113495bSYour Name 	__qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes);
1198*5113495bSYour Name 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
1199*5113495bSYour Name }
1200*5113495bSYour Name 
1201*5113495bSYour Name qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug);
1202*5113495bSYour Name 
qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_dir_t dir,int nbytes,const char * func,uint32_t line)1203*5113495bSYour Name QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,
1204*5113495bSYour Name 					    qdf_nbuf_t buf,
1205*5113495bSYour Name 					    qdf_dma_dir_t dir,
1206*5113495bSYour Name 					    int nbytes,
1207*5113495bSYour Name 					    const char *func,
1208*5113495bSYour Name 					    uint32_t line)
1209*5113495bSYour Name {
1210*5113495bSYour Name 	QDF_STATUS status;
1211*5113495bSYour Name 
1212*5113495bSYour Name 	status = qdf_nbuf_track_map(buf, func, line);
1213*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status))
1214*5113495bSYour Name 		return status;
1215*5113495bSYour Name 
1216*5113495bSYour Name 	status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes);
1217*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status)) {
1218*5113495bSYour Name 		qdf_nbuf_untrack_map(buf, func, line);
1219*5113495bSYour Name 	} else {
1220*5113495bSYour Name 		if (!is_initial_mem_debug_disabled)
1221*5113495bSYour Name 			qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
1222*5113495bSYour Name 		qdf_net_buf_debug_update_map_node(buf, func, line);
1223*5113495bSYour Name 	}
1224*5113495bSYour Name 
1225*5113495bSYour Name 	return status;
1226*5113495bSYour Name }
1227*5113495bSYour Name 
1228*5113495bSYour Name qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug);
1229*5113495bSYour Name 
qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_dir_t dir,int nbytes,const char * func,uint32_t line)1230*5113495bSYour Name void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,
1231*5113495bSYour Name 					qdf_nbuf_t buf,
1232*5113495bSYour Name 					qdf_dma_dir_t dir,
1233*5113495bSYour Name 					int nbytes,
1234*5113495bSYour Name 					const char *func,
1235*5113495bSYour Name 					uint32_t line)
1236*5113495bSYour Name {
1237*5113495bSYour Name 	qdf_nbuf_untrack_map(buf, func, line);
1238*5113495bSYour Name 	__qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes);
1239*5113495bSYour Name 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
1240*5113495bSYour Name }
1241*5113495bSYour Name 
1242*5113495bSYour Name qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug);
1243*5113495bSYour Name 
qdf_nbuf_unmap_nbytes_single_paddr_debug(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_addr_t phy_addr,qdf_dma_dir_t dir,int nbytes,const char * func,uint32_t line)1244*5113495bSYour Name void qdf_nbuf_unmap_nbytes_single_paddr_debug(qdf_device_t osdev,
1245*5113495bSYour Name 					      qdf_nbuf_t buf,
1246*5113495bSYour Name 					      qdf_dma_addr_t phy_addr,
1247*5113495bSYour Name 					      qdf_dma_dir_t dir, int nbytes,
1248*5113495bSYour Name 					      const char *func, uint32_t line)
1249*5113495bSYour Name {
1250*5113495bSYour Name 	qdf_nbuf_untrack_map(buf, func, line);
1251*5113495bSYour Name 	__qdf_record_nbuf_nbytes(__qdf_nbuf_get_end_offset(buf), dir, false);
1252*5113495bSYour Name 	__qdf_mem_unmap_nbytes_single(osdev, phy_addr, dir, nbytes);
1253*5113495bSYour Name 	qdf_net_buf_debug_update_unmap_node(buf, func, line);
1254*5113495bSYour Name }
1255*5113495bSYour Name 
1256*5113495bSYour Name qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_paddr_debug);
1257*5113495bSYour Name 
qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,const char * func,uint32_t line)1258*5113495bSYour Name static void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
1259*5113495bSYour Name 					     const char *func,
1260*5113495bSYour Name 					     uint32_t line)
1261*5113495bSYour Name {
1262*5113495bSYour Name 	char map_func[QDF_TRACKER_FUNC_SIZE];
1263*5113495bSYour Name 	uint32_t map_line;
1264*5113495bSYour Name 
1265*5113495bSYour Name 	if (!qdf_tracker_lookup(&qdf_nbuf_map_tracker, nbuf,
1266*5113495bSYour Name 				&map_func, &map_line))
1267*5113495bSYour Name 		return;
1268*5113495bSYour Name 
1269*5113495bSYour Name 	QDF_MEMDEBUG_PANIC("Nbuf freed @ %s:%u while mapped from %s:%u",
1270*5113495bSYour Name 			   func, line, map_func, map_line);
1271*5113495bSYour Name }
1272*5113495bSYour Name #else
qdf_nbuf_map_tracking_init(void)1273*5113495bSYour Name static inline void qdf_nbuf_map_tracking_init(void)
1274*5113495bSYour Name {
1275*5113495bSYour Name }
1276*5113495bSYour Name 
qdf_nbuf_map_tracking_deinit(void)1277*5113495bSYour Name static inline void qdf_nbuf_map_tracking_deinit(void)
1278*5113495bSYour Name {
1279*5113495bSYour Name }
1280*5113495bSYour Name 
qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,const char * func,uint32_t line)1281*5113495bSYour Name static inline void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
1282*5113495bSYour Name 						    const char *func,
1283*5113495bSYour Name 						    uint32_t line)
1284*5113495bSYour Name {
1285*5113495bSYour Name }
1286*5113495bSYour Name #endif /* NBUF_MAP_UNMAP_DEBUG */
1287*5113495bSYour Name 
1288*5113495bSYour Name #ifdef QDF_OS_DEBUG
1289*5113495bSYour Name QDF_STATUS
__qdf_nbuf_map(qdf_device_t osdev,struct sk_buff * skb,qdf_dma_dir_t dir)1290*5113495bSYour Name __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
1291*5113495bSYour Name {
1292*5113495bSYour Name 	struct skb_shared_info *sh = skb_shinfo(skb);
1293*5113495bSYour Name 
1294*5113495bSYour Name 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
1295*5113495bSYour Name 			|| (dir == QDF_DMA_FROM_DEVICE));
1296*5113495bSYour Name 
1297*5113495bSYour Name 	/*
1298*5113495bSYour Name 	 * Assume there's only a single fragment.
1299*5113495bSYour Name 	 * To support multiple fragments, it would be necessary to change
1300*5113495bSYour Name 	 * qdf_nbuf_t to be a separate object that stores meta-info
1301*5113495bSYour Name 	 * (including the bus address for each fragment) and a pointer
1302*5113495bSYour Name 	 * to the underlying sk_buff.
1303*5113495bSYour Name 	 */
1304*5113495bSYour Name 	qdf_assert(sh->nr_frags == 0);
1305*5113495bSYour Name 
1306*5113495bSYour Name 	return __qdf_nbuf_map_single(osdev, skb, dir);
1307*5113495bSYour Name }
1308*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_map);
1309*5113495bSYour Name 
1310*5113495bSYour Name #else
1311*5113495bSYour Name QDF_STATUS
__qdf_nbuf_map(qdf_device_t osdev,struct sk_buff * skb,qdf_dma_dir_t dir)1312*5113495bSYour Name __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
1313*5113495bSYour Name {
1314*5113495bSYour Name 	return __qdf_nbuf_map_single(osdev, skb, dir);
1315*5113495bSYour Name }
1316*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_map);
1317*5113495bSYour Name #endif
1318*5113495bSYour Name 
1319*5113495bSYour Name void
__qdf_nbuf_unmap(qdf_device_t osdev,struct sk_buff * skb,qdf_dma_dir_t dir)1320*5113495bSYour Name __qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
1321*5113495bSYour Name 			qdf_dma_dir_t dir)
1322*5113495bSYour Name {
1323*5113495bSYour Name 	qdf_assert((dir == QDF_DMA_TO_DEVICE)
1324*5113495bSYour Name 		   || (dir == QDF_DMA_FROM_DEVICE));
1325*5113495bSYour Name 
1326*5113495bSYour Name 	/*
1327*5113495bSYour Name 	 * Assume there's a single fragment.
1328*5113495bSYour Name 	 * If this is not true, the assertion in __qdf_nbuf_map will catch it.
1329*5113495bSYour Name 	 */
1330*5113495bSYour Name 	__qdf_nbuf_unmap_single(osdev, skb, dir);
1331*5113495bSYour Name }
1332*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_unmap);
1333*5113495bSYour Name 
1334*5113495bSYour Name #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
1335*5113495bSYour Name QDF_STATUS
__qdf_nbuf_map_single(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_dir_t dir)1336*5113495bSYour Name __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
1337*5113495bSYour Name {
1338*5113495bSYour Name 	qdf_dma_addr_t paddr;
1339*5113495bSYour Name 
1340*5113495bSYour Name 	QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data;
1341*5113495bSYour Name 	BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data));
1342*5113495bSYour Name 	BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data));
1343*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1344*5113495bSYour Name }
1345*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_map_single);
1346*5113495bSYour Name #else
1347*5113495bSYour Name QDF_STATUS
__qdf_nbuf_map_single(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_dir_t dir)1348*5113495bSYour Name __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
1349*5113495bSYour Name {
1350*5113495bSYour Name 	qdf_dma_addr_t paddr;
1351*5113495bSYour Name 
1352*5113495bSYour Name 	/* assume that the OS only provides a single fragment */
1353*5113495bSYour Name 	QDF_NBUF_CB_PADDR(buf) = paddr =
1354*5113495bSYour Name 		dma_map_single(osdev->dev, buf->data,
1355*5113495bSYour Name 				skb_end_pointer(buf) - buf->data,
1356*5113495bSYour Name 				__qdf_dma_dir_to_os(dir));
1357*5113495bSYour Name 	__qdf_record_nbuf_nbytes(
1358*5113495bSYour Name 		__qdf_nbuf_get_end_offset(buf), dir, true);
1359*5113495bSYour Name 	return dma_mapping_error(osdev->dev, paddr)
1360*5113495bSYour Name 		? QDF_STATUS_E_FAILURE
1361*5113495bSYour Name 		: QDF_STATUS_SUCCESS;
1362*5113495bSYour Name }
1363*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_map_single);
1364*5113495bSYour Name #endif
1365*5113495bSYour Name 
1366*5113495bSYour Name #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
__qdf_nbuf_unmap_single(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_dir_t dir)1367*5113495bSYour Name void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
1368*5113495bSYour Name 				qdf_dma_dir_t dir)
1369*5113495bSYour Name {
1370*5113495bSYour Name }
1371*5113495bSYour Name #else
__qdf_nbuf_unmap_single(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_dir_t dir)1372*5113495bSYour Name void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
1373*5113495bSYour Name 					qdf_dma_dir_t dir)
1374*5113495bSYour Name {
1375*5113495bSYour Name 	if (QDF_NBUF_CB_PADDR(buf)) {
1376*5113495bSYour Name 		__qdf_record_nbuf_nbytes(
1377*5113495bSYour Name 			__qdf_nbuf_get_end_offset(buf), dir, false);
1378*5113495bSYour Name 		dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
1379*5113495bSYour Name 			skb_end_pointer(buf) - buf->data,
1380*5113495bSYour Name 			__qdf_dma_dir_to_os(dir));
1381*5113495bSYour Name 	}
1382*5113495bSYour Name }
1383*5113495bSYour Name #endif
1384*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_unmap_single);
1385*5113495bSYour Name 
1386*5113495bSYour Name QDF_STATUS
__qdf_nbuf_set_rx_cksum(struct sk_buff * skb,qdf_nbuf_rx_cksum_t * cksum)1387*5113495bSYour Name __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
1388*5113495bSYour Name {
1389*5113495bSYour Name 	switch (cksum->l4_result) {
1390*5113495bSYour Name 	case QDF_NBUF_RX_CKSUM_NONE:
1391*5113495bSYour Name 		skb->ip_summed = CHECKSUM_NONE;
1392*5113495bSYour Name 		break;
1393*5113495bSYour Name 	case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
1394*5113495bSYour Name 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1395*5113495bSYour Name 		skb->csum_level = cksum->csum_level;
1396*5113495bSYour Name 		break;
1397*5113495bSYour Name 	case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
1398*5113495bSYour Name 		skb->ip_summed = CHECKSUM_PARTIAL;
1399*5113495bSYour Name 		skb->csum = cksum->val;
1400*5113495bSYour Name 		break;
1401*5113495bSYour Name 	default:
1402*5113495bSYour Name 		pr_err("Unknown checksum type\n");
1403*5113495bSYour Name 		qdf_assert(0);
1404*5113495bSYour Name 		return QDF_STATUS_E_NOSUPPORT;
1405*5113495bSYour Name 	}
1406*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1407*5113495bSYour Name }
1408*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_set_rx_cksum);
1409*5113495bSYour Name 
__qdf_nbuf_get_tx_cksum(struct sk_buff * skb)1410*5113495bSYour Name qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
1411*5113495bSYour Name {
1412*5113495bSYour Name 	switch (skb->ip_summed) {
1413*5113495bSYour Name 	case CHECKSUM_NONE:
1414*5113495bSYour Name 		return QDF_NBUF_TX_CKSUM_NONE;
1415*5113495bSYour Name 	case CHECKSUM_PARTIAL:
1416*5113495bSYour Name 		return QDF_NBUF_TX_CKSUM_TCP_UDP;
1417*5113495bSYour Name 	case CHECKSUM_COMPLETE:
1418*5113495bSYour Name 		return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
1419*5113495bSYour Name 	default:
1420*5113495bSYour Name 		return QDF_NBUF_TX_CKSUM_NONE;
1421*5113495bSYour Name 	}
1422*5113495bSYour Name }
1423*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_get_tx_cksum);
1424*5113495bSYour Name 
__qdf_nbuf_get_tid(struct sk_buff * skb)1425*5113495bSYour Name uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
1426*5113495bSYour Name {
1427*5113495bSYour Name 	return skb->priority;
1428*5113495bSYour Name }
1429*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_get_tid);
1430*5113495bSYour Name 
__qdf_nbuf_set_tid(struct sk_buff * skb,uint8_t tid)1431*5113495bSYour Name void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
1432*5113495bSYour Name {
1433*5113495bSYour Name 	skb->priority = tid;
1434*5113495bSYour Name }
1435*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_set_tid);
1436*5113495bSYour Name 
__qdf_nbuf_get_exemption_type(struct sk_buff * skb)1437*5113495bSYour Name uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
1438*5113495bSYour Name {
1439*5113495bSYour Name 	return QDF_NBUF_EXEMPT_NO_EXEMPTION;
1440*5113495bSYour Name }
1441*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_get_exemption_type);
1442*5113495bSYour Name 
__qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)1443*5113495bSYour Name void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
1444*5113495bSYour Name {
1445*5113495bSYour Name 	qdf_trace_update_cb = cb_func_ptr;
1446*5113495bSYour Name }
1447*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_reg_trace_cb);
1448*5113495bSYour Name 
1449*5113495bSYour Name enum qdf_proto_subtype
__qdf_nbuf_data_get_dhcp_subtype(uint8_t * data)1450*5113495bSYour Name __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data)
1451*5113495bSYour Name {
1452*5113495bSYour Name 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1453*5113495bSYour Name 
1454*5113495bSYour Name 	if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) &&
1455*5113495bSYour Name 		(data[QDF_DHCP_OPTION53_LENGTH_OFFSET] ==
1456*5113495bSYour Name 					QDF_DHCP_OPTION53_LENGTH)) {
1457*5113495bSYour Name 
1458*5113495bSYour Name 		switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) {
1459*5113495bSYour Name 		case QDF_DHCP_DISCOVER:
1460*5113495bSYour Name 			subtype = QDF_PROTO_DHCP_DISCOVER;
1461*5113495bSYour Name 			break;
1462*5113495bSYour Name 		case QDF_DHCP_REQUEST:
1463*5113495bSYour Name 			subtype = QDF_PROTO_DHCP_REQUEST;
1464*5113495bSYour Name 			break;
1465*5113495bSYour Name 		case QDF_DHCP_OFFER:
1466*5113495bSYour Name 			subtype = QDF_PROTO_DHCP_OFFER;
1467*5113495bSYour Name 			break;
1468*5113495bSYour Name 		case QDF_DHCP_ACK:
1469*5113495bSYour Name 			subtype = QDF_PROTO_DHCP_ACK;
1470*5113495bSYour Name 			break;
1471*5113495bSYour Name 		case QDF_DHCP_NAK:
1472*5113495bSYour Name 			subtype = QDF_PROTO_DHCP_NACK;
1473*5113495bSYour Name 			break;
1474*5113495bSYour Name 		case QDF_DHCP_RELEASE:
1475*5113495bSYour Name 			subtype = QDF_PROTO_DHCP_RELEASE;
1476*5113495bSYour Name 			break;
1477*5113495bSYour Name 		case QDF_DHCP_INFORM:
1478*5113495bSYour Name 			subtype = QDF_PROTO_DHCP_INFORM;
1479*5113495bSYour Name 			break;
1480*5113495bSYour Name 		case QDF_DHCP_DECLINE:
1481*5113495bSYour Name 			subtype = QDF_PROTO_DHCP_DECLINE;
1482*5113495bSYour Name 			break;
1483*5113495bSYour Name 		default:
1484*5113495bSYour Name 			break;
1485*5113495bSYour Name 		}
1486*5113495bSYour Name 	}
1487*5113495bSYour Name 
1488*5113495bSYour Name 	return subtype;
1489*5113495bSYour Name }
1490*5113495bSYour Name 
1491*5113495bSYour Name #define EAPOL_WPA_KEY_INFO_ACK BIT(7)
1492*5113495bSYour Name #define EAPOL_WPA_KEY_INFO_MIC BIT(8)
1493*5113495bSYour Name #define EAPOL_WPA_KEY_INFO_ENCR_KEY_DATA BIT(12) /* IEEE 802.11i/RSN only */
1494*5113495bSYour Name 
1495*5113495bSYour Name /**
1496*5113495bSYour Name  * __qdf_nbuf_data_get_eapol_key() - Get EAPOL key
1497*5113495bSYour Name  * @data: Pointer to EAPOL packet data buffer
1498*5113495bSYour Name  *
1499*5113495bSYour Name  * We can distinguish M1/M3 from M2/M4 by the ack bit in the keyinfo field
1500*5113495bSYour Name  * The ralationship between the ack bit and EAPOL type is as follows:
1501*5113495bSYour Name  *
1502*5113495bSYour Name  *  EAPOL type  |   M1    M2   M3  M4
1503*5113495bSYour Name  * --------------------------------------
1504*5113495bSYour Name  *     Ack      |   1     0    1   0
1505*5113495bSYour Name  * --------------------------------------
1506*5113495bSYour Name  *
1507*5113495bSYour Name  * Then, we can differentiate M1 from M3, M2 from M4 by below methods:
1508*5113495bSYour Name  * M2/M4: by keyDataLength or Nonce value being 0 for M4.
1509*5113495bSYour Name  * M1/M3: by the mic/encrKeyData bit in the keyinfo field.
1510*5113495bSYour Name  *
1511*5113495bSYour Name  * Return: subtype of the EAPOL packet.
1512*5113495bSYour Name  */
1513*5113495bSYour Name static inline enum qdf_proto_subtype
__qdf_nbuf_data_get_eapol_key(uint8_t * data)1514*5113495bSYour Name __qdf_nbuf_data_get_eapol_key(uint8_t *data)
1515*5113495bSYour Name {
1516*5113495bSYour Name 	uint16_t key_info, key_data_length;
1517*5113495bSYour Name 	enum qdf_proto_subtype subtype;
1518*5113495bSYour Name 	uint64_t *key_nonce;
1519*5113495bSYour Name 
1520*5113495bSYour Name 	key_info = qdf_ntohs((uint16_t)(*(uint16_t *)
1521*5113495bSYour Name 			(data + EAPOL_KEY_INFO_OFFSET)));
1522*5113495bSYour Name 
1523*5113495bSYour Name 	key_data_length = qdf_ntohs((uint16_t)(*(uint16_t *)
1524*5113495bSYour Name 				(data + EAPOL_KEY_DATA_LENGTH_OFFSET)));
1525*5113495bSYour Name 	key_nonce = (uint64_t *)(data + EAPOL_WPA_KEY_NONCE_OFFSET);
1526*5113495bSYour Name 
1527*5113495bSYour Name 	if (key_info & EAPOL_WPA_KEY_INFO_ACK)
1528*5113495bSYour Name 		if (key_info &
1529*5113495bSYour Name 		    (EAPOL_WPA_KEY_INFO_MIC | EAPOL_WPA_KEY_INFO_ENCR_KEY_DATA))
1530*5113495bSYour Name 			subtype = QDF_PROTO_EAPOL_M3;
1531*5113495bSYour Name 		else
1532*5113495bSYour Name 			subtype = QDF_PROTO_EAPOL_M1;
1533*5113495bSYour Name 	else
1534*5113495bSYour Name 		if (key_data_length == 0 ||
1535*5113495bSYour Name 		    !((*key_nonce) || (*(key_nonce + 1)) ||
1536*5113495bSYour Name 		      (*(key_nonce + 2)) || (*(key_nonce + 3))))
1537*5113495bSYour Name 			subtype = QDF_PROTO_EAPOL_M4;
1538*5113495bSYour Name 		else
1539*5113495bSYour Name 			subtype = QDF_PROTO_EAPOL_M2;
1540*5113495bSYour Name 
1541*5113495bSYour Name 	return subtype;
1542*5113495bSYour Name }
1543*5113495bSYour Name 
1544*5113495bSYour Name /**
1545*5113495bSYour Name  * __qdf_nbuf_data_get_exp_msg_type() - Get EAP expanded msg type
1546*5113495bSYour Name  * @data: Pointer to EAPOL packet data buffer
1547*5113495bSYour Name  * @code: EAP code
1548*5113495bSYour Name  *
1549*5113495bSYour Name  * Return: subtype of the EAPOL packet.
1550*5113495bSYour Name  */
1551*5113495bSYour Name static inline enum qdf_proto_subtype
__qdf_nbuf_data_get_exp_msg_type(uint8_t * data,uint8_t code)1552*5113495bSYour Name __qdf_nbuf_data_get_exp_msg_type(uint8_t *data, uint8_t code)
1553*5113495bSYour Name {
1554*5113495bSYour Name 	uint8_t msg_type;
1555*5113495bSYour Name 	uint8_t opcode = *(data + EAP_EXP_MSG_OPCODE_OFFSET);
1556*5113495bSYour Name 
1557*5113495bSYour Name 	switch (opcode) {
1558*5113495bSYour Name 	case WSC_START:
1559*5113495bSYour Name 		return QDF_PROTO_EAP_WSC_START;
1560*5113495bSYour Name 	case WSC_ACK:
1561*5113495bSYour Name 		return QDF_PROTO_EAP_WSC_ACK;
1562*5113495bSYour Name 	case WSC_NACK:
1563*5113495bSYour Name 		return QDF_PROTO_EAP_WSC_NACK;
1564*5113495bSYour Name 	case WSC_MSG:
1565*5113495bSYour Name 		msg_type = *(data + EAP_EXP_MSG_TYPE_OFFSET);
1566*5113495bSYour Name 		switch (msg_type) {
1567*5113495bSYour Name 		case EAP_EXP_TYPE_M1:
1568*5113495bSYour Name 			return QDF_PROTO_EAP_M1;
1569*5113495bSYour Name 		case EAP_EXP_TYPE_M2:
1570*5113495bSYour Name 			return QDF_PROTO_EAP_M2;
1571*5113495bSYour Name 		case EAP_EXP_TYPE_M3:
1572*5113495bSYour Name 			return QDF_PROTO_EAP_M3;
1573*5113495bSYour Name 		case EAP_EXP_TYPE_M4:
1574*5113495bSYour Name 			return QDF_PROTO_EAP_M4;
1575*5113495bSYour Name 		case EAP_EXP_TYPE_M5:
1576*5113495bSYour Name 			return QDF_PROTO_EAP_M5;
1577*5113495bSYour Name 		case EAP_EXP_TYPE_M6:
1578*5113495bSYour Name 			return QDF_PROTO_EAP_M6;
1579*5113495bSYour Name 		case EAP_EXP_TYPE_M7:
1580*5113495bSYour Name 			return QDF_PROTO_EAP_M7;
1581*5113495bSYour Name 		case EAP_EXP_TYPE_M8:
1582*5113495bSYour Name 			return QDF_PROTO_EAP_M8;
1583*5113495bSYour Name 		default:
1584*5113495bSYour Name 			break;
1585*5113495bSYour Name 		}
1586*5113495bSYour Name 		break;
1587*5113495bSYour Name 	case WSC_DONE:
1588*5113495bSYour Name 		return QDF_PROTO_EAP_WSC_DONE;
1589*5113495bSYour Name 	case WSC_FRAG_ACK:
1590*5113495bSYour Name 		return QDF_PROTO_EAP_WSC_FRAG_ACK;
1591*5113495bSYour Name 	default:
1592*5113495bSYour Name 		break;
1593*5113495bSYour Name 	}
1594*5113495bSYour Name 	switch (code) {
1595*5113495bSYour Name 	case QDF_EAP_REQUEST:
1596*5113495bSYour Name 		return QDF_PROTO_EAP_REQUEST;
1597*5113495bSYour Name 	case QDF_EAP_RESPONSE:
1598*5113495bSYour Name 		return QDF_PROTO_EAP_RESPONSE;
1599*5113495bSYour Name 	default:
1600*5113495bSYour Name 		return QDF_PROTO_INVALID;
1601*5113495bSYour Name 	}
1602*5113495bSYour Name }
1603*5113495bSYour Name 
1604*5113495bSYour Name /**
1605*5113495bSYour Name  * __qdf_nbuf_data_get_eap_type() - Get EAP type
1606*5113495bSYour Name  * @data: Pointer to EAPOL packet data buffer
1607*5113495bSYour Name  * @code: EAP code
1608*5113495bSYour Name  *
1609*5113495bSYour Name  * Return: subtype of the EAPOL packet.
1610*5113495bSYour Name  */
1611*5113495bSYour Name static inline enum qdf_proto_subtype
__qdf_nbuf_data_get_eap_type(uint8_t * data,uint8_t code)1612*5113495bSYour Name __qdf_nbuf_data_get_eap_type(uint8_t *data, uint8_t code)
1613*5113495bSYour Name {
1614*5113495bSYour Name 	uint8_t type = *(data + EAP_TYPE_OFFSET);
1615*5113495bSYour Name 
1616*5113495bSYour Name 	switch (type) {
1617*5113495bSYour Name 	case EAP_PACKET_TYPE_EXP:
1618*5113495bSYour Name 		return __qdf_nbuf_data_get_exp_msg_type(data, code);
1619*5113495bSYour Name 	case EAP_PACKET_TYPE_ID:
1620*5113495bSYour Name 		switch (code) {
1621*5113495bSYour Name 		case QDF_EAP_REQUEST:
1622*5113495bSYour Name 			return QDF_PROTO_EAP_REQ_ID;
1623*5113495bSYour Name 		case QDF_EAP_RESPONSE:
1624*5113495bSYour Name 			return QDF_PROTO_EAP_RSP_ID;
1625*5113495bSYour Name 		default:
1626*5113495bSYour Name 			return QDF_PROTO_INVALID;
1627*5113495bSYour Name 		}
1628*5113495bSYour Name 	default:
1629*5113495bSYour Name 		switch (code) {
1630*5113495bSYour Name 		case QDF_EAP_REQUEST:
1631*5113495bSYour Name 			return QDF_PROTO_EAP_REQUEST;
1632*5113495bSYour Name 		case QDF_EAP_RESPONSE:
1633*5113495bSYour Name 			return QDF_PROTO_EAP_RESPONSE;
1634*5113495bSYour Name 		default:
1635*5113495bSYour Name 			return QDF_PROTO_INVALID;
1636*5113495bSYour Name 		}
1637*5113495bSYour Name 	}
1638*5113495bSYour Name }
1639*5113495bSYour Name 
1640*5113495bSYour Name /**
1641*5113495bSYour Name  * __qdf_nbuf_data_get_eap_code() - Get EAPOL code
1642*5113495bSYour Name  * @data: Pointer to EAPOL packet data buffer
1643*5113495bSYour Name  *
1644*5113495bSYour Name  * Return: subtype of the EAPOL packet.
1645*5113495bSYour Name  */
1646*5113495bSYour Name static inline enum qdf_proto_subtype
__qdf_nbuf_data_get_eap_code(uint8_t * data)1647*5113495bSYour Name __qdf_nbuf_data_get_eap_code(uint8_t *data)
1648*5113495bSYour Name {
1649*5113495bSYour Name 	uint8_t code = *(data + EAP_CODE_OFFSET);
1650*5113495bSYour Name 
1651*5113495bSYour Name 	switch (code) {
1652*5113495bSYour Name 	case QDF_EAP_REQUEST:
1653*5113495bSYour Name 	case QDF_EAP_RESPONSE:
1654*5113495bSYour Name 		return __qdf_nbuf_data_get_eap_type(data, code);
1655*5113495bSYour Name 	case QDF_EAP_SUCCESS:
1656*5113495bSYour Name 		return QDF_PROTO_EAP_SUCCESS;
1657*5113495bSYour Name 	case QDF_EAP_FAILURE:
1658*5113495bSYour Name 		return QDF_PROTO_EAP_FAILURE;
1659*5113495bSYour Name 	case QDF_EAP_INITIATE:
1660*5113495bSYour Name 		return QDF_PROTO_EAP_INITIATE;
1661*5113495bSYour Name 	case QDF_EAP_FINISH:
1662*5113495bSYour Name 		return QDF_PROTO_EAP_FINISH;
1663*5113495bSYour Name 	default:
1664*5113495bSYour Name 		return QDF_PROTO_INVALID;
1665*5113495bSYour Name 	}
1666*5113495bSYour Name }
1667*5113495bSYour Name 
1668*5113495bSYour Name enum qdf_proto_subtype
__qdf_nbuf_data_get_eapol_subtype(uint8_t * data)1669*5113495bSYour Name __qdf_nbuf_data_get_eapol_subtype(uint8_t *data)
1670*5113495bSYour Name {
1671*5113495bSYour Name 	uint8_t pkt_type = *(data + EAPOL_PACKET_TYPE_OFFSET);
1672*5113495bSYour Name 
1673*5113495bSYour Name 	switch (pkt_type) {
1674*5113495bSYour Name 	case EAPOL_PACKET_TYPE_EAP:
1675*5113495bSYour Name 		return __qdf_nbuf_data_get_eap_code(data);
1676*5113495bSYour Name 	case EAPOL_PACKET_TYPE_START:
1677*5113495bSYour Name 		return QDF_PROTO_EAPOL_START;
1678*5113495bSYour Name 	case EAPOL_PACKET_TYPE_LOGOFF:
1679*5113495bSYour Name 		return QDF_PROTO_EAPOL_LOGOFF;
1680*5113495bSYour Name 	case EAPOL_PACKET_TYPE_KEY:
1681*5113495bSYour Name 		return __qdf_nbuf_data_get_eapol_key(data);
1682*5113495bSYour Name 	case EAPOL_PACKET_TYPE_ASF:
1683*5113495bSYour Name 		return QDF_PROTO_EAPOL_ASF;
1684*5113495bSYour Name 	default:
1685*5113495bSYour Name 		return QDF_PROTO_INVALID;
1686*5113495bSYour Name 	}
1687*5113495bSYour Name }
1688*5113495bSYour Name 
1689*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_data_get_eapol_subtype);
1690*5113495bSYour Name 
1691*5113495bSYour Name enum qdf_proto_subtype
__qdf_nbuf_data_get_arp_subtype(uint8_t * data)1692*5113495bSYour Name __qdf_nbuf_data_get_arp_subtype(uint8_t *data)
1693*5113495bSYour Name {
1694*5113495bSYour Name 	uint16_t subtype;
1695*5113495bSYour Name 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1696*5113495bSYour Name 
1697*5113495bSYour Name 	subtype = (uint16_t)(*(uint16_t *)
1698*5113495bSYour Name 			(data + ARP_SUB_TYPE_OFFSET));
1699*5113495bSYour Name 
1700*5113495bSYour Name 	switch (QDF_SWAP_U16(subtype)) {
1701*5113495bSYour Name 	case ARP_REQUEST:
1702*5113495bSYour Name 		proto_subtype = QDF_PROTO_ARP_REQ;
1703*5113495bSYour Name 		break;
1704*5113495bSYour Name 	case ARP_RESPONSE:
1705*5113495bSYour Name 		proto_subtype = QDF_PROTO_ARP_RES;
1706*5113495bSYour Name 		break;
1707*5113495bSYour Name 	default:
1708*5113495bSYour Name 		break;
1709*5113495bSYour Name 	}
1710*5113495bSYour Name 
1711*5113495bSYour Name 	return proto_subtype;
1712*5113495bSYour Name }
1713*5113495bSYour Name 
1714*5113495bSYour Name enum qdf_proto_subtype
__qdf_nbuf_data_get_icmp_subtype(uint8_t * data)1715*5113495bSYour Name __qdf_nbuf_data_get_icmp_subtype(uint8_t *data)
1716*5113495bSYour Name {
1717*5113495bSYour Name 	uint8_t subtype;
1718*5113495bSYour Name 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1719*5113495bSYour Name 
1720*5113495bSYour Name 	subtype = (uint8_t)(*(uint8_t *)
1721*5113495bSYour Name 			(data + ICMP_SUBTYPE_OFFSET));
1722*5113495bSYour Name 
1723*5113495bSYour Name 	switch (subtype) {
1724*5113495bSYour Name 	case ICMP_REQUEST:
1725*5113495bSYour Name 		proto_subtype = QDF_PROTO_ICMP_REQ;
1726*5113495bSYour Name 		break;
1727*5113495bSYour Name 	case ICMP_RESPONSE:
1728*5113495bSYour Name 		proto_subtype = QDF_PROTO_ICMP_RES;
1729*5113495bSYour Name 		break;
1730*5113495bSYour Name 	default:
1731*5113495bSYour Name 		break;
1732*5113495bSYour Name 	}
1733*5113495bSYour Name 
1734*5113495bSYour Name 	return proto_subtype;
1735*5113495bSYour Name }
1736*5113495bSYour Name 
1737*5113495bSYour Name enum qdf_proto_subtype
__qdf_nbuf_data_get_icmpv6_subtype(uint8_t * data)1738*5113495bSYour Name __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data)
1739*5113495bSYour Name {
1740*5113495bSYour Name 	uint8_t subtype;
1741*5113495bSYour Name 	enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
1742*5113495bSYour Name 
1743*5113495bSYour Name 	subtype = (uint8_t)(*(uint8_t *)
1744*5113495bSYour Name 			(data + ICMPV6_SUBTYPE_OFFSET));
1745*5113495bSYour Name 
1746*5113495bSYour Name 	switch (subtype) {
1747*5113495bSYour Name 	case ICMPV6_REQUEST:
1748*5113495bSYour Name 		proto_subtype = QDF_PROTO_ICMPV6_REQ;
1749*5113495bSYour Name 		break;
1750*5113495bSYour Name 	case ICMPV6_RESPONSE:
1751*5113495bSYour Name 		proto_subtype = QDF_PROTO_ICMPV6_RES;
1752*5113495bSYour Name 		break;
1753*5113495bSYour Name 	case ICMPV6_RS:
1754*5113495bSYour Name 		proto_subtype = QDF_PROTO_ICMPV6_RS;
1755*5113495bSYour Name 		break;
1756*5113495bSYour Name 	case ICMPV6_RA:
1757*5113495bSYour Name 		proto_subtype = QDF_PROTO_ICMPV6_RA;
1758*5113495bSYour Name 		break;
1759*5113495bSYour Name 	case ICMPV6_NS:
1760*5113495bSYour Name 		proto_subtype = QDF_PROTO_ICMPV6_NS;
1761*5113495bSYour Name 		break;
1762*5113495bSYour Name 	case ICMPV6_NA:
1763*5113495bSYour Name 		proto_subtype = QDF_PROTO_ICMPV6_NA;
1764*5113495bSYour Name 		break;
1765*5113495bSYour Name 	default:
1766*5113495bSYour Name 		break;
1767*5113495bSYour Name 	}
1768*5113495bSYour Name 
1769*5113495bSYour Name 	return proto_subtype;
1770*5113495bSYour Name }
1771*5113495bSYour Name 
1772*5113495bSYour Name bool
__qdf_nbuf_is_ipv4_last_fragment(struct sk_buff * skb)1773*5113495bSYour Name __qdf_nbuf_is_ipv4_last_fragment(struct sk_buff *skb)
1774*5113495bSYour Name {
1775*5113495bSYour Name 	if (((ntohs(ip_hdr(skb)->frag_off) & ~IP_OFFSET) & IP_MF) == 0)
1776*5113495bSYour Name 		return true;
1777*5113495bSYour Name 
1778*5113495bSYour Name 	return false;
1779*5113495bSYour Name }
1780*5113495bSYour Name 
1781*5113495bSYour Name bool
__qdf_nbuf_is_ipv4_fragment(struct sk_buff * skb)1782*5113495bSYour Name __qdf_nbuf_is_ipv4_fragment(struct sk_buff *skb)
1783*5113495bSYour Name {
1784*5113495bSYour Name 	if (ntohs(ip_hdr(skb)->frag_off) & IP_MF)
1785*5113495bSYour Name 		return true;
1786*5113495bSYour Name 
1787*5113495bSYour Name 	return false;
1788*5113495bSYour Name }
1789*5113495bSYour Name 
1790*5113495bSYour Name void
__qdf_nbuf_data_set_ipv4_tos(uint8_t * data,uint8_t tos)1791*5113495bSYour Name __qdf_nbuf_data_set_ipv4_tos(uint8_t *data, uint8_t tos)
1792*5113495bSYour Name {
1793*5113495bSYour Name 	*(uint8_t *)(data + QDF_NBUF_TRAC_IPV4_TOS_OFFSET) = tos;
1794*5113495bSYour Name }
1795*5113495bSYour Name 
1796*5113495bSYour Name uint8_t
__qdf_nbuf_data_get_ipv4_tos(uint8_t * data)1797*5113495bSYour Name __qdf_nbuf_data_get_ipv4_tos(uint8_t *data)
1798*5113495bSYour Name {
1799*5113495bSYour Name 	uint8_t tos;
1800*5113495bSYour Name 
1801*5113495bSYour Name 	tos = (uint8_t)(*(uint8_t *)(data +
1802*5113495bSYour Name 			QDF_NBUF_TRAC_IPV4_TOS_OFFSET));
1803*5113495bSYour Name 	return tos;
1804*5113495bSYour Name }
1805*5113495bSYour Name 
1806*5113495bSYour Name uint8_t
__qdf_nbuf_data_get_ipv4_proto(uint8_t * data)1807*5113495bSYour Name __qdf_nbuf_data_get_ipv4_proto(uint8_t *data)
1808*5113495bSYour Name {
1809*5113495bSYour Name 	uint8_t proto_type;
1810*5113495bSYour Name 
1811*5113495bSYour Name 	proto_type = (uint8_t)(*(uint8_t *)(data +
1812*5113495bSYour Name 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1813*5113495bSYour Name 	return proto_type;
1814*5113495bSYour Name }
1815*5113495bSYour Name 
1816*5113495bSYour Name uint8_t
__qdf_nbuf_data_get_ipv6_tc(uint8_t * data)1817*5113495bSYour Name __qdf_nbuf_data_get_ipv6_tc(uint8_t *data)
1818*5113495bSYour Name {
1819*5113495bSYour Name 	struct ipv6hdr *hdr;
1820*5113495bSYour Name 
1821*5113495bSYour Name 	hdr =  (struct ipv6hdr *)(data + QDF_NBUF_TRAC_IPV6_OFFSET);
1822*5113495bSYour Name 	return ip6_tclass(ip6_flowinfo(hdr));
1823*5113495bSYour Name }
1824*5113495bSYour Name 
1825*5113495bSYour Name void
__qdf_nbuf_data_set_ipv6_tc(uint8_t * data,uint8_t tc)1826*5113495bSYour Name __qdf_nbuf_data_set_ipv6_tc(uint8_t *data, uint8_t tc)
1827*5113495bSYour Name {
1828*5113495bSYour Name 	struct ipv6hdr *hdr;
1829*5113495bSYour Name 
1830*5113495bSYour Name 	hdr =  (struct ipv6hdr *)(data + QDF_NBUF_TRAC_IPV6_OFFSET);
1831*5113495bSYour Name 	ip6_flow_hdr(hdr, tc, ip6_flowlabel(hdr));
1832*5113495bSYour Name }
1833*5113495bSYour Name 
1834*5113495bSYour Name uint8_t
__qdf_nbuf_data_get_ipv6_proto(uint8_t * data)1835*5113495bSYour Name __qdf_nbuf_data_get_ipv6_proto(uint8_t *data)
1836*5113495bSYour Name {
1837*5113495bSYour Name 	uint8_t proto_type;
1838*5113495bSYour Name 
1839*5113495bSYour Name 	proto_type = (uint8_t)(*(uint8_t *)(data +
1840*5113495bSYour Name 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
1841*5113495bSYour Name 	return proto_type;
1842*5113495bSYour Name }
1843*5113495bSYour Name 
__qdf_nbuf_data_is_ipv4_pkt(uint8_t * data)1844*5113495bSYour Name bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data)
1845*5113495bSYour Name {
1846*5113495bSYour Name 	uint16_t ether_type;
1847*5113495bSYour Name 
1848*5113495bSYour Name 	ether_type = (uint16_t)(*(uint16_t *)(data +
1849*5113495bSYour Name 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1850*5113495bSYour Name 
1851*5113495bSYour Name 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1852*5113495bSYour Name 		return true;
1853*5113495bSYour Name 	else
1854*5113495bSYour Name 		return false;
1855*5113495bSYour Name }
1856*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt);
1857*5113495bSYour Name 
__qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t * data)1858*5113495bSYour Name bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data)
1859*5113495bSYour Name {
1860*5113495bSYour Name 	uint16_t sport;
1861*5113495bSYour Name 	uint16_t dport;
1862*5113495bSYour Name 	uint8_t ipv4_offset;
1863*5113495bSYour Name 	uint8_t ipv4_hdr_len;
1864*5113495bSYour Name 	struct iphdr *iphdr;
1865*5113495bSYour Name 
1866*5113495bSYour Name 	if (__qdf_nbuf_get_ether_type(data) !=
1867*5113495bSYour Name 	    QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1868*5113495bSYour Name 		return false;
1869*5113495bSYour Name 
1870*5113495bSYour Name 	ipv4_offset = __qdf_nbuf_get_ip_offset(data);
1871*5113495bSYour Name 	iphdr = (struct iphdr *)(data + ipv4_offset);
1872*5113495bSYour Name 	ipv4_hdr_len = iphdr->ihl * QDF_NBUF_IPV4_HDR_SIZE_UNIT;
1873*5113495bSYour Name 
1874*5113495bSYour Name 	sport = *(uint16_t *)(data + ipv4_offset + ipv4_hdr_len);
1875*5113495bSYour Name 	dport = *(uint16_t *)(data + ipv4_offset + ipv4_hdr_len +
1876*5113495bSYour Name 			      sizeof(uint16_t));
1877*5113495bSYour Name 
1878*5113495bSYour Name 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) &&
1879*5113495bSYour Name 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) ||
1880*5113495bSYour Name 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) &&
1881*5113495bSYour Name 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT))))
1882*5113495bSYour Name 		return true;
1883*5113495bSYour Name 	else
1884*5113495bSYour Name 		return false;
1885*5113495bSYour Name }
1886*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt);
1887*5113495bSYour Name 
1888*5113495bSYour Name /**
1889*5113495bSYour Name  * qdf_is_eapol_type() - check if packet is EAPOL
1890*5113495bSYour Name  * @type: Packet type
1891*5113495bSYour Name  *
1892*5113495bSYour Name  * This api is to check if frame is EAPOL packet type.
1893*5113495bSYour Name  *
1894*5113495bSYour Name  * Return: true if it is EAPOL frame
1895*5113495bSYour Name  *         false otherwise.
1896*5113495bSYour Name  */
1897*5113495bSYour Name #ifdef BIG_ENDIAN_HOST
qdf_is_eapol_type(uint16_t type)1898*5113495bSYour Name static inline bool qdf_is_eapol_type(uint16_t type)
1899*5113495bSYour Name {
1900*5113495bSYour Name 	return (type == QDF_NBUF_TRAC_EAPOL_ETH_TYPE);
1901*5113495bSYour Name }
1902*5113495bSYour Name #else
qdf_is_eapol_type(uint16_t type)1903*5113495bSYour Name static inline bool qdf_is_eapol_type(uint16_t type)
1904*5113495bSYour Name {
1905*5113495bSYour Name 	return (type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE));
1906*5113495bSYour Name }
1907*5113495bSYour Name #endif
1908*5113495bSYour Name 
__qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t * data)1909*5113495bSYour Name bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data)
1910*5113495bSYour Name {
1911*5113495bSYour Name 	uint16_t ether_type;
1912*5113495bSYour Name 
1913*5113495bSYour Name 	ether_type = __qdf_nbuf_get_ether_type(data);
1914*5113495bSYour Name 
1915*5113495bSYour Name 	return qdf_is_eapol_type(ether_type);
1916*5113495bSYour Name }
1917*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt);
1918*5113495bSYour Name 
__qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff * skb)1919*5113495bSYour Name bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb)
1920*5113495bSYour Name {
1921*5113495bSYour Name 	uint16_t ether_type;
1922*5113495bSYour Name 
1923*5113495bSYour Name 	ether_type = (uint16_t)(*(uint16_t *)(skb->data +
1924*5113495bSYour Name 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
1925*5113495bSYour Name 
1926*5113495bSYour Name 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE))
1927*5113495bSYour Name 		return true;
1928*5113495bSYour Name 	else
1929*5113495bSYour Name 		return false;
1930*5113495bSYour Name }
1931*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt);
1932*5113495bSYour Name 
1933*5113495bSYour Name /**
1934*5113495bSYour Name  * qdf_nbuf_is_ipv6_vlan_pkt() - check whether packet is vlan IPV6
1935*5113495bSYour Name  * @data: Pointer to network data buffer
1936*5113495bSYour Name  *
1937*5113495bSYour Name  * This api is for vlan header included ipv6 packet.
1938*5113495bSYour Name  *
1939*5113495bSYour Name  * Return: true if packet is vlan header included IPV6
1940*5113495bSYour Name  *	   false otherwise.
1941*5113495bSYour Name  */
qdf_nbuf_is_ipv6_vlan_pkt(uint8_t * data)1942*5113495bSYour Name static bool qdf_nbuf_is_ipv6_vlan_pkt(uint8_t *data)
1943*5113495bSYour Name {
1944*5113495bSYour Name 	uint16_t ether_type;
1945*5113495bSYour Name 
1946*5113495bSYour Name 	ether_type = *(uint16_t *)(data + QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
1947*5113495bSYour Name 
1948*5113495bSYour Name 	if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q))) {
1949*5113495bSYour Name 		ether_type = *(uint16_t *)(data +
1950*5113495bSYour Name 					   QDF_NBUF_TRAC_VLAN_ETH_TYPE_OFFSET);
1951*5113495bSYour Name 
1952*5113495bSYour Name 		if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
1953*5113495bSYour Name 			return true;
1954*5113495bSYour Name 	}
1955*5113495bSYour Name 	return false;
1956*5113495bSYour Name }
1957*5113495bSYour Name 
1958*5113495bSYour Name /**
1959*5113495bSYour Name  * qdf_nbuf_is_ipv4_vlan_pkt() - check whether packet is vlan IPV4
1960*5113495bSYour Name  * @data: Pointer to network data buffer
1961*5113495bSYour Name  *
1962*5113495bSYour Name  * This api is for vlan header included ipv4 packet.
1963*5113495bSYour Name  *
1964*5113495bSYour Name  * Return: true if packet is vlan header included IPV4
1965*5113495bSYour Name  *	   false otherwise.
1966*5113495bSYour Name  */
qdf_nbuf_is_ipv4_vlan_pkt(uint8_t * data)1967*5113495bSYour Name static bool qdf_nbuf_is_ipv4_vlan_pkt(uint8_t *data)
1968*5113495bSYour Name {
1969*5113495bSYour Name 	uint16_t ether_type;
1970*5113495bSYour Name 
1971*5113495bSYour Name 	ether_type = *(uint16_t *)(data + QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
1972*5113495bSYour Name 
1973*5113495bSYour Name 	if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q))) {
1974*5113495bSYour Name 		ether_type = *(uint16_t *)(data +
1975*5113495bSYour Name 					   QDF_NBUF_TRAC_VLAN_ETH_TYPE_OFFSET);
1976*5113495bSYour Name 
1977*5113495bSYour Name 		if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
1978*5113495bSYour Name 			return true;
1979*5113495bSYour Name 	}
1980*5113495bSYour Name 	return false;
1981*5113495bSYour Name }
1982*5113495bSYour Name 
__qdf_nbuf_data_is_ipv4_igmp_pkt(uint8_t * data)1983*5113495bSYour Name bool __qdf_nbuf_data_is_ipv4_igmp_pkt(uint8_t *data)
1984*5113495bSYour Name {
1985*5113495bSYour Name 	uint8_t pkt_type;
1986*5113495bSYour Name 
1987*5113495bSYour Name 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
1988*5113495bSYour Name 		pkt_type = (uint8_t)(*(uint8_t *)(data +
1989*5113495bSYour Name 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
1990*5113495bSYour Name 		goto is_igmp;
1991*5113495bSYour Name 	}
1992*5113495bSYour Name 
1993*5113495bSYour Name 	if (qdf_nbuf_is_ipv4_vlan_pkt(data)) {
1994*5113495bSYour Name 		pkt_type = (uint8_t)(*(uint8_t *)(
1995*5113495bSYour Name 				data +
1996*5113495bSYour Name 				QDF_NBUF_TRAC_VLAN_IPV4_PROTO_TYPE_OFFSET));
1997*5113495bSYour Name 		goto is_igmp;
1998*5113495bSYour Name 	}
1999*5113495bSYour Name 
2000*5113495bSYour Name 	return false;
2001*5113495bSYour Name is_igmp:
2002*5113495bSYour Name 	if (pkt_type == QDF_NBUF_TRAC_IGMP_TYPE)
2003*5113495bSYour Name 		return true;
2004*5113495bSYour Name 
2005*5113495bSYour Name 	return false;
2006*5113495bSYour Name }
2007*5113495bSYour Name 
2008*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_data_is_ipv4_igmp_pkt);
2009*5113495bSYour Name 
__qdf_nbuf_data_is_ipv6_igmp_pkt(uint8_t * data)2010*5113495bSYour Name bool __qdf_nbuf_data_is_ipv6_igmp_pkt(uint8_t *data)
2011*5113495bSYour Name {
2012*5113495bSYour Name 	uint8_t pkt_type;
2013*5113495bSYour Name 	uint8_t next_hdr;
2014*5113495bSYour Name 
2015*5113495bSYour Name 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2016*5113495bSYour Name 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2017*5113495bSYour Name 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2018*5113495bSYour Name 		next_hdr = (uint8_t)(*(uint8_t *)(
2019*5113495bSYour Name 				data +
2020*5113495bSYour Name 				QDF_NBUF_TRAC_IPV6_OFFSET +
2021*5113495bSYour Name 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE));
2022*5113495bSYour Name 		goto is_mld;
2023*5113495bSYour Name 	}
2024*5113495bSYour Name 
2025*5113495bSYour Name 	if (qdf_nbuf_is_ipv6_vlan_pkt(data)) {
2026*5113495bSYour Name 		pkt_type = (uint8_t)(*(uint8_t *)(
2027*5113495bSYour Name 				data +
2028*5113495bSYour Name 				QDF_NBUF_TRAC_VLAN_IPV6_PROTO_TYPE_OFFSET));
2029*5113495bSYour Name 		next_hdr = (uint8_t)(*(uint8_t *)(
2030*5113495bSYour Name 				data +
2031*5113495bSYour Name 				QDF_NBUF_TRAC_VLAN_IPV6_OFFSET +
2032*5113495bSYour Name 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE));
2033*5113495bSYour Name 		goto is_mld;
2034*5113495bSYour Name 	}
2035*5113495bSYour Name 
2036*5113495bSYour Name 	return false;
2037*5113495bSYour Name is_mld:
2038*5113495bSYour Name 	if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
2039*5113495bSYour Name 		return true;
2040*5113495bSYour Name 	if ((pkt_type == QDF_NBUF_TRAC_HOPOPTS_TYPE) &&
2041*5113495bSYour Name 	    (next_hdr == QDF_NBUF_TRAC_ICMPV6_TYPE))
2042*5113495bSYour Name 		return true;
2043*5113495bSYour Name 
2044*5113495bSYour Name 	return false;
2045*5113495bSYour Name }
2046*5113495bSYour Name 
2047*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_data_is_ipv6_igmp_pkt);
2048*5113495bSYour Name 
__qdf_nbuf_is_ipv4_igmp_leave_pkt(__qdf_nbuf_t buf)2049*5113495bSYour Name bool __qdf_nbuf_is_ipv4_igmp_leave_pkt(__qdf_nbuf_t buf)
2050*5113495bSYour Name {
2051*5113495bSYour Name 	qdf_ether_header_t *eh = NULL;
2052*5113495bSYour Name 	uint16_t ether_type;
2053*5113495bSYour Name 	uint8_t eth_hdr_size = sizeof(qdf_ether_header_t);
2054*5113495bSYour Name 
2055*5113495bSYour Name 	eh = (qdf_ether_header_t *)qdf_nbuf_data(buf);
2056*5113495bSYour Name 	ether_type = eh->ether_type;
2057*5113495bSYour Name 
2058*5113495bSYour Name 	if (ether_type == htons(ETH_P_8021Q)) {
2059*5113495bSYour Name 		struct vlan_ethhdr *veth =
2060*5113495bSYour Name 				(struct vlan_ethhdr *)qdf_nbuf_data(buf);
2061*5113495bSYour Name 		ether_type = veth->h_vlan_encapsulated_proto;
2062*5113495bSYour Name 		eth_hdr_size = sizeof(struct vlan_ethhdr);
2063*5113495bSYour Name 	}
2064*5113495bSYour Name 
2065*5113495bSYour Name 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE)) {
2066*5113495bSYour Name 		struct iphdr *iph = NULL;
2067*5113495bSYour Name 		struct igmphdr *ih = NULL;
2068*5113495bSYour Name 
2069*5113495bSYour Name 		iph = (struct iphdr *)(qdf_nbuf_data(buf) + eth_hdr_size);
2070*5113495bSYour Name 		ih = (struct igmphdr *)((uint8_t *)iph + iph->ihl * 4);
2071*5113495bSYour Name 		switch (ih->type) {
2072*5113495bSYour Name 		case IGMP_HOST_LEAVE_MESSAGE:
2073*5113495bSYour Name 			return true;
2074*5113495bSYour Name 		case IGMPV3_HOST_MEMBERSHIP_REPORT:
2075*5113495bSYour Name 		{
2076*5113495bSYour Name 			struct igmpv3_report *ihv3 = (struct igmpv3_report *)ih;
2077*5113495bSYour Name 			struct igmpv3_grec *grec = NULL;
2078*5113495bSYour Name 			int num = 0;
2079*5113495bSYour Name 			int i = 0;
2080*5113495bSYour Name 			int len = 0;
2081*5113495bSYour Name 			int type = 0;
2082*5113495bSYour Name 
2083*5113495bSYour Name 			num = ntohs(ihv3->ngrec);
2084*5113495bSYour Name 			for (i = 0; i < num; i++) {
2085*5113495bSYour Name 				grec = (void *)((uint8_t *)(ihv3->grec) + len);
2086*5113495bSYour Name 				type = grec->grec_type;
2087*5113495bSYour Name 				if ((type == IGMPV3_MODE_IS_INCLUDE) ||
2088*5113495bSYour Name 				    (type == IGMPV3_CHANGE_TO_INCLUDE))
2089*5113495bSYour Name 					return true;
2090*5113495bSYour Name 
2091*5113495bSYour Name 				len += sizeof(struct igmpv3_grec);
2092*5113495bSYour Name 				len += ntohs(grec->grec_nsrcs) * 4;
2093*5113495bSYour Name 			}
2094*5113495bSYour Name 			break;
2095*5113495bSYour Name 		}
2096*5113495bSYour Name 		default:
2097*5113495bSYour Name 			break;
2098*5113495bSYour Name 		}
2099*5113495bSYour Name 	}
2100*5113495bSYour Name 
2101*5113495bSYour Name 	return false;
2102*5113495bSYour Name }
2103*5113495bSYour Name 
2104*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_is_ipv4_igmp_leave_pkt);
2105*5113495bSYour Name 
__qdf_nbuf_is_ipv6_igmp_leave_pkt(__qdf_nbuf_t buf)2106*5113495bSYour Name bool __qdf_nbuf_is_ipv6_igmp_leave_pkt(__qdf_nbuf_t buf)
2107*5113495bSYour Name {
2108*5113495bSYour Name 	qdf_ether_header_t *eh = NULL;
2109*5113495bSYour Name 	uint16_t ether_type;
2110*5113495bSYour Name 	uint8_t eth_hdr_size = sizeof(qdf_ether_header_t);
2111*5113495bSYour Name 
2112*5113495bSYour Name 	eh = (qdf_ether_header_t *)qdf_nbuf_data(buf);
2113*5113495bSYour Name 	ether_type = eh->ether_type;
2114*5113495bSYour Name 
2115*5113495bSYour Name 	if (ether_type == htons(ETH_P_8021Q)) {
2116*5113495bSYour Name 		struct vlan_ethhdr *veth =
2117*5113495bSYour Name 				(struct vlan_ethhdr *)qdf_nbuf_data(buf);
2118*5113495bSYour Name 		ether_type = veth->h_vlan_encapsulated_proto;
2119*5113495bSYour Name 		eth_hdr_size = sizeof(struct vlan_ethhdr);
2120*5113495bSYour Name 	}
2121*5113495bSYour Name 
2122*5113495bSYour Name 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE)) {
2123*5113495bSYour Name 		struct ipv6hdr *ip6h = NULL;
2124*5113495bSYour Name 		struct icmp6hdr *icmp6h = NULL;
2125*5113495bSYour Name 		uint8_t nexthdr;
2126*5113495bSYour Name 		uint16_t frag_off = 0;
2127*5113495bSYour Name 		int offset;
2128*5113495bSYour Name 		qdf_nbuf_t buf_copy = NULL;
2129*5113495bSYour Name 
2130*5113495bSYour Name 		ip6h = (struct ipv6hdr *)(qdf_nbuf_data(buf) + eth_hdr_size);
2131*5113495bSYour Name 		if (ip6h->nexthdr != IPPROTO_HOPOPTS ||
2132*5113495bSYour Name 		    ip6h->payload_len == 0)
2133*5113495bSYour Name 			return false;
2134*5113495bSYour Name 
2135*5113495bSYour Name 		buf_copy = qdf_nbuf_copy(buf);
2136*5113495bSYour Name 		if (qdf_likely(!buf_copy))
2137*5113495bSYour Name 			return false;
2138*5113495bSYour Name 
2139*5113495bSYour Name 		nexthdr = ip6h->nexthdr;
2140*5113495bSYour Name 		offset = ipv6_skip_exthdr(buf_copy,
2141*5113495bSYour Name 					  eth_hdr_size + sizeof(*ip6h),
2142*5113495bSYour Name 					  &nexthdr,
2143*5113495bSYour Name 					  &frag_off);
2144*5113495bSYour Name 		qdf_nbuf_free(buf_copy);
2145*5113495bSYour Name 		if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
2146*5113495bSYour Name 			return false;
2147*5113495bSYour Name 
2148*5113495bSYour Name 		icmp6h = (struct icmp6hdr *)(qdf_nbuf_data(buf) + offset);
2149*5113495bSYour Name 
2150*5113495bSYour Name 		switch (icmp6h->icmp6_type) {
2151*5113495bSYour Name 		case ICMPV6_MGM_REDUCTION:
2152*5113495bSYour Name 			return true;
2153*5113495bSYour Name 		case ICMPV6_MLD2_REPORT:
2154*5113495bSYour Name 		{
2155*5113495bSYour Name 			struct mld2_report *mh = NULL;
2156*5113495bSYour Name 			struct mld2_grec *grec = NULL;
2157*5113495bSYour Name 			int num = 0;
2158*5113495bSYour Name 			int i = 0;
2159*5113495bSYour Name 			int len = 0;
2160*5113495bSYour Name 			int type = -1;
2161*5113495bSYour Name 
2162*5113495bSYour Name 			mh = (struct mld2_report *)icmp6h;
2163*5113495bSYour Name 			num = ntohs(mh->mld2r_ngrec);
2164*5113495bSYour Name 			for (i = 0; i < num; i++) {
2165*5113495bSYour Name 				grec = (void *)(((uint8_t *)mh->mld2r_grec) +
2166*5113495bSYour Name 						len);
2167*5113495bSYour Name 				type = grec->grec_type;
2168*5113495bSYour Name 				if ((type == MLD2_MODE_IS_INCLUDE) ||
2169*5113495bSYour Name 				    (type == MLD2_CHANGE_TO_INCLUDE))
2170*5113495bSYour Name 					return true;
2171*5113495bSYour Name 				else if (type == MLD2_BLOCK_OLD_SOURCES)
2172*5113495bSYour Name 					return true;
2173*5113495bSYour Name 
2174*5113495bSYour Name 				len += sizeof(struct mld2_grec);
2175*5113495bSYour Name 				len += ntohs(grec->grec_nsrcs) *
2176*5113495bSYour Name 						sizeof(struct in6_addr);
2177*5113495bSYour Name 			}
2178*5113495bSYour Name 			break;
2179*5113495bSYour Name 		}
2180*5113495bSYour Name 		default:
2181*5113495bSYour Name 			break;
2182*5113495bSYour Name 		}
2183*5113495bSYour Name 	}
2184*5113495bSYour Name 
2185*5113495bSYour Name 	return false;
2186*5113495bSYour Name }
2187*5113495bSYour Name 
2188*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_is_ipv6_igmp_leave_pkt);
2189*5113495bSYour Name 
__qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff * skb)2190*5113495bSYour Name bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb)
2191*5113495bSYour Name {
2192*5113495bSYour Name 	uint16_t ether_type;
2193*5113495bSYour Name 
2194*5113495bSYour Name 	ether_type = *(uint16_t *)(skb->data +
2195*5113495bSYour Name 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
2196*5113495bSYour Name 
2197*5113495bSYour Name 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE))
2198*5113495bSYour Name 		return true;
2199*5113495bSYour Name 	else
2200*5113495bSYour Name 		return false;
2201*5113495bSYour Name }
2202*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt);
2203*5113495bSYour Name 
__qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t * data)2204*5113495bSYour Name bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data)
2205*5113495bSYour Name {
2206*5113495bSYour Name 	uint16_t ether_type;
2207*5113495bSYour Name 
2208*5113495bSYour Name 	ether_type = __qdf_nbuf_get_ether_type(data);
2209*5113495bSYour Name 
2210*5113495bSYour Name 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE))
2211*5113495bSYour Name 		return true;
2212*5113495bSYour Name 	else
2213*5113495bSYour Name 		return false;
2214*5113495bSYour Name }
2215*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt);
2216*5113495bSYour Name 
__qdf_nbuf_data_is_arp_req(uint8_t * data)2217*5113495bSYour Name bool __qdf_nbuf_data_is_arp_req(uint8_t *data)
2218*5113495bSYour Name {
2219*5113495bSYour Name 	uint16_t op_code;
2220*5113495bSYour Name 
2221*5113495bSYour Name 	op_code = (uint16_t)(*(uint16_t *)(data +
2222*5113495bSYour Name 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
2223*5113495bSYour Name 
2224*5113495bSYour Name 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ))
2225*5113495bSYour Name 		return true;
2226*5113495bSYour Name 	return false;
2227*5113495bSYour Name }
2228*5113495bSYour Name 
__qdf_nbuf_data_is_arp_rsp(uint8_t * data)2229*5113495bSYour Name bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data)
2230*5113495bSYour Name {
2231*5113495bSYour Name 	uint16_t op_code;
2232*5113495bSYour Name 
2233*5113495bSYour Name 	op_code = (uint16_t)(*(uint16_t *)(data +
2234*5113495bSYour Name 				QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
2235*5113495bSYour Name 
2236*5113495bSYour Name 	if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY))
2237*5113495bSYour Name 		return true;
2238*5113495bSYour Name 	return false;
2239*5113495bSYour Name }
2240*5113495bSYour Name 
__qdf_nbuf_get_arp_src_ip(uint8_t * data)2241*5113495bSYour Name uint32_t  __qdf_nbuf_get_arp_src_ip(uint8_t *data)
2242*5113495bSYour Name {
2243*5113495bSYour Name 	uint32_t src_ip;
2244*5113495bSYour Name 
2245*5113495bSYour Name 	src_ip = (uint32_t)(*(uint32_t *)(data +
2246*5113495bSYour Name 				QDF_NBUF_PKT_ARP_SRC_IP_OFFSET));
2247*5113495bSYour Name 
2248*5113495bSYour Name 	return src_ip;
2249*5113495bSYour Name }
2250*5113495bSYour Name 
__qdf_nbuf_get_arp_tgt_ip(uint8_t * data)2251*5113495bSYour Name uint32_t  __qdf_nbuf_get_arp_tgt_ip(uint8_t *data)
2252*5113495bSYour Name {
2253*5113495bSYour Name 	uint32_t tgt_ip;
2254*5113495bSYour Name 
2255*5113495bSYour Name 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
2256*5113495bSYour Name 				QDF_NBUF_PKT_ARP_TGT_IP_OFFSET));
2257*5113495bSYour Name 
2258*5113495bSYour Name 	return tgt_ip;
2259*5113495bSYour Name }
2260*5113495bSYour Name 
__qdf_nbuf_get_dns_domain_name(uint8_t * data,uint32_t len)2261*5113495bSYour Name uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len)
2262*5113495bSYour Name {
2263*5113495bSYour Name 	uint8_t *domain_name;
2264*5113495bSYour Name 
2265*5113495bSYour Name 	domain_name = (uint8_t *)
2266*5113495bSYour Name 			(data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET);
2267*5113495bSYour Name 	return domain_name;
2268*5113495bSYour Name }
2269*5113495bSYour Name 
__qdf_nbuf_data_is_dns_query(uint8_t * data)2270*5113495bSYour Name bool __qdf_nbuf_data_is_dns_query(uint8_t *data)
2271*5113495bSYour Name {
2272*5113495bSYour Name 	uint16_t op_code;
2273*5113495bSYour Name 	uint16_t tgt_port;
2274*5113495bSYour Name 
2275*5113495bSYour Name 	tgt_port = (uint16_t)(*(uint16_t *)(data +
2276*5113495bSYour Name 				QDF_NBUF_PKT_DNS_DST_PORT_OFFSET));
2277*5113495bSYour Name 	/* Standard DNS query always happen on Dest Port 53. */
2278*5113495bSYour Name 	if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
2279*5113495bSYour Name 		op_code = (uint16_t)(*(uint16_t *)(data +
2280*5113495bSYour Name 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
2281*5113495bSYour Name 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
2282*5113495bSYour Name 				QDF_NBUF_PKT_DNSOP_STANDARD_QUERY)
2283*5113495bSYour Name 			return true;
2284*5113495bSYour Name 	}
2285*5113495bSYour Name 	return false;
2286*5113495bSYour Name }
2287*5113495bSYour Name 
__qdf_nbuf_data_is_dns_response(uint8_t * data)2288*5113495bSYour Name bool __qdf_nbuf_data_is_dns_response(uint8_t *data)
2289*5113495bSYour Name {
2290*5113495bSYour Name 	uint16_t op_code;
2291*5113495bSYour Name 	uint16_t src_port;
2292*5113495bSYour Name 
2293*5113495bSYour Name 	src_port = (uint16_t)(*(uint16_t *)(data +
2294*5113495bSYour Name 				QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET));
2295*5113495bSYour Name 	/* Standard DNS response always comes on Src Port 53. */
2296*5113495bSYour Name 	if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
2297*5113495bSYour Name 		op_code = (uint16_t)(*(uint16_t *)(data +
2298*5113495bSYour Name 				QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
2299*5113495bSYour Name 
2300*5113495bSYour Name 		if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
2301*5113495bSYour Name 				QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE)
2302*5113495bSYour Name 			return true;
2303*5113495bSYour Name 	}
2304*5113495bSYour Name 	return false;
2305*5113495bSYour Name }
2306*5113495bSYour Name 
__qdf_nbuf_data_is_tcp_fin(uint8_t * data)2307*5113495bSYour Name bool __qdf_nbuf_data_is_tcp_fin(uint8_t *data)
2308*5113495bSYour Name {
2309*5113495bSYour Name 	uint8_t op_code;
2310*5113495bSYour Name 
2311*5113495bSYour Name 	op_code = (uint8_t)(*(uint8_t *)(data +
2312*5113495bSYour Name 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2313*5113495bSYour Name 
2314*5113495bSYour Name 	if (op_code == QDF_NBUF_PKT_TCPOP_FIN)
2315*5113495bSYour Name 		return true;
2316*5113495bSYour Name 
2317*5113495bSYour Name 	return false;
2318*5113495bSYour Name }
2319*5113495bSYour Name 
__qdf_nbuf_data_is_tcp_fin_ack(uint8_t * data)2320*5113495bSYour Name bool __qdf_nbuf_data_is_tcp_fin_ack(uint8_t *data)
2321*5113495bSYour Name {
2322*5113495bSYour Name 	uint8_t op_code;
2323*5113495bSYour Name 
2324*5113495bSYour Name 	op_code = (uint8_t)(*(uint8_t *)(data +
2325*5113495bSYour Name 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2326*5113495bSYour Name 
2327*5113495bSYour Name 	if (op_code == QDF_NBUF_PKT_TCPOP_FIN_ACK)
2328*5113495bSYour Name 		return true;
2329*5113495bSYour Name 
2330*5113495bSYour Name 	return false;
2331*5113495bSYour Name }
2332*5113495bSYour Name 
__qdf_nbuf_data_is_tcp_syn(uint8_t * data)2333*5113495bSYour Name bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data)
2334*5113495bSYour Name {
2335*5113495bSYour Name 	uint8_t op_code;
2336*5113495bSYour Name 
2337*5113495bSYour Name 	op_code = (uint8_t)(*(uint8_t *)(data +
2338*5113495bSYour Name 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2339*5113495bSYour Name 
2340*5113495bSYour Name 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN)
2341*5113495bSYour Name 		return true;
2342*5113495bSYour Name 	return false;
2343*5113495bSYour Name }
2344*5113495bSYour Name 
__qdf_nbuf_data_is_tcp_syn_ack(uint8_t * data)2345*5113495bSYour Name bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data)
2346*5113495bSYour Name {
2347*5113495bSYour Name 	uint8_t op_code;
2348*5113495bSYour Name 
2349*5113495bSYour Name 	op_code = (uint8_t)(*(uint8_t *)(data +
2350*5113495bSYour Name 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2351*5113495bSYour Name 
2352*5113495bSYour Name 	if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK)
2353*5113495bSYour Name 		return true;
2354*5113495bSYour Name 	return false;
2355*5113495bSYour Name }
2356*5113495bSYour Name 
__qdf_nbuf_data_is_tcp_rst(uint8_t * data)2357*5113495bSYour Name bool __qdf_nbuf_data_is_tcp_rst(uint8_t *data)
2358*5113495bSYour Name {
2359*5113495bSYour Name 	uint8_t op_code;
2360*5113495bSYour Name 
2361*5113495bSYour Name 	op_code = (uint8_t)(*(uint8_t *)(data +
2362*5113495bSYour Name 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2363*5113495bSYour Name 
2364*5113495bSYour Name 	if (op_code == QDF_NBUF_PKT_TCPOP_RST)
2365*5113495bSYour Name 		return true;
2366*5113495bSYour Name 
2367*5113495bSYour Name 	return false;
2368*5113495bSYour Name }
2369*5113495bSYour Name 
__qdf_nbuf_data_is_tcp_ack(uint8_t * data)2370*5113495bSYour Name bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data)
2371*5113495bSYour Name {
2372*5113495bSYour Name 	uint8_t op_code;
2373*5113495bSYour Name 
2374*5113495bSYour Name 	op_code = (uint8_t)(*(uint8_t *)(data +
2375*5113495bSYour Name 				QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
2376*5113495bSYour Name 
2377*5113495bSYour Name 	if (op_code == QDF_NBUF_PKT_TCPOP_ACK)
2378*5113495bSYour Name 		return true;
2379*5113495bSYour Name 	return false;
2380*5113495bSYour Name }
2381*5113495bSYour Name 
__qdf_nbuf_data_get_tcp_src_port(uint8_t * data)2382*5113495bSYour Name uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data)
2383*5113495bSYour Name {
2384*5113495bSYour Name 	uint16_t src_port;
2385*5113495bSYour Name 
2386*5113495bSYour Name 	src_port = (uint16_t)(*(uint16_t *)(data +
2387*5113495bSYour Name 				QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET));
2388*5113495bSYour Name 
2389*5113495bSYour Name 	return src_port;
2390*5113495bSYour Name }
2391*5113495bSYour Name 
__qdf_nbuf_data_get_tcp_dst_port(uint8_t * data)2392*5113495bSYour Name uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data)
2393*5113495bSYour Name {
2394*5113495bSYour Name 	uint16_t tgt_port;
2395*5113495bSYour Name 
2396*5113495bSYour Name 	tgt_port = (uint16_t)(*(uint16_t *)(data +
2397*5113495bSYour Name 				QDF_NBUF_PKT_TCP_DST_PORT_OFFSET));
2398*5113495bSYour Name 
2399*5113495bSYour Name 	return tgt_port;
2400*5113495bSYour Name }
2401*5113495bSYour Name 
__qdf_nbuf_data_is_icmpv4_req(uint8_t * data)2402*5113495bSYour Name bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data)
2403*5113495bSYour Name {
2404*5113495bSYour Name 	uint8_t op_code;
2405*5113495bSYour Name 
2406*5113495bSYour Name 	op_code = (uint8_t)(*(uint8_t *)(data +
2407*5113495bSYour Name 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
2408*5113495bSYour Name 
2409*5113495bSYour Name 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ)
2410*5113495bSYour Name 		return true;
2411*5113495bSYour Name 	return false;
2412*5113495bSYour Name }
2413*5113495bSYour Name 
__qdf_nbuf_data_is_icmpv4_rsp(uint8_t * data)2414*5113495bSYour Name bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data)
2415*5113495bSYour Name {
2416*5113495bSYour Name 	uint8_t op_code;
2417*5113495bSYour Name 
2418*5113495bSYour Name 	op_code = (uint8_t)(*(uint8_t *)(data +
2419*5113495bSYour Name 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
2420*5113495bSYour Name 
2421*5113495bSYour Name 	if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY)
2422*5113495bSYour Name 		return true;
2423*5113495bSYour Name 	return false;
2424*5113495bSYour Name }
2425*5113495bSYour Name 
__qdf_nbuf_data_is_icmpv4_redirect(uint8_t * data)2426*5113495bSYour Name bool __qdf_nbuf_data_is_icmpv4_redirect(uint8_t *data)
2427*5113495bSYour Name {
2428*5113495bSYour Name 	uint8_t op_code;
2429*5113495bSYour Name 
2430*5113495bSYour Name 	op_code = (uint8_t)(*(uint8_t *)(data +
2431*5113495bSYour Name 				QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
2432*5113495bSYour Name 
2433*5113495bSYour Name 	if (op_code == QDF_NBUF_PKT_ICMPV4_REDIRECT)
2434*5113495bSYour Name 		return true;
2435*5113495bSYour Name 	return false;
2436*5113495bSYour Name }
2437*5113495bSYour Name 
2438*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_data_is_icmpv4_redirect);
2439*5113495bSYour Name 
__qdf_nbuf_data_is_icmpv6_redirect(uint8_t * data)2440*5113495bSYour Name bool __qdf_nbuf_data_is_icmpv6_redirect(uint8_t *data)
2441*5113495bSYour Name {
2442*5113495bSYour Name 	uint8_t subtype;
2443*5113495bSYour Name 
2444*5113495bSYour Name 	subtype = (uint8_t)(*(uint8_t *)(data + ICMPV6_SUBTYPE_OFFSET));
2445*5113495bSYour Name 
2446*5113495bSYour Name 	if (subtype == ICMPV6_REDIRECT)
2447*5113495bSYour Name 		return true;
2448*5113495bSYour Name 	return false;
2449*5113495bSYour Name }
2450*5113495bSYour Name 
2451*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_data_is_icmpv6_redirect);
2452*5113495bSYour Name 
__qdf_nbuf_get_icmpv4_src_ip(uint8_t * data)2453*5113495bSYour Name uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data)
2454*5113495bSYour Name {
2455*5113495bSYour Name 	uint32_t src_ip;
2456*5113495bSYour Name 
2457*5113495bSYour Name 	src_ip = (uint32_t)(*(uint32_t *)(data +
2458*5113495bSYour Name 				QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET));
2459*5113495bSYour Name 
2460*5113495bSYour Name 	return src_ip;
2461*5113495bSYour Name }
2462*5113495bSYour Name 
__qdf_nbuf_get_icmpv4_tgt_ip(uint8_t * data)2463*5113495bSYour Name uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data)
2464*5113495bSYour Name {
2465*5113495bSYour Name 	uint32_t tgt_ip;
2466*5113495bSYour Name 
2467*5113495bSYour Name 	tgt_ip = (uint32_t)(*(uint32_t *)(data +
2468*5113495bSYour Name 				QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET));
2469*5113495bSYour Name 
2470*5113495bSYour Name 	return tgt_ip;
2471*5113495bSYour Name }
2472*5113495bSYour Name 
__qdf_nbuf_data_is_ipv6_pkt(uint8_t * data)2473*5113495bSYour Name bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data)
2474*5113495bSYour Name {
2475*5113495bSYour Name 	uint16_t ether_type;
2476*5113495bSYour Name 
2477*5113495bSYour Name 	ether_type = (uint16_t)(*(uint16_t *)(data +
2478*5113495bSYour Name 				QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
2479*5113495bSYour Name 
2480*5113495bSYour Name 	if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
2481*5113495bSYour Name 		return true;
2482*5113495bSYour Name 	else
2483*5113495bSYour Name 		return false;
2484*5113495bSYour Name }
2485*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt);
2486*5113495bSYour Name 
__qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t * data)2487*5113495bSYour Name bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data)
2488*5113495bSYour Name {
2489*5113495bSYour Name 	uint16_t sport;
2490*5113495bSYour Name 	uint16_t dport;
2491*5113495bSYour Name 	uint8_t ipv6_offset;
2492*5113495bSYour Name 
2493*5113495bSYour Name 	if (!__qdf_nbuf_data_is_ipv6_pkt(data))
2494*5113495bSYour Name 		return false;
2495*5113495bSYour Name 
2496*5113495bSYour Name 	ipv6_offset = __qdf_nbuf_get_ip_offset(data);
2497*5113495bSYour Name 	sport = *(uint16_t *)(data + ipv6_offset +
2498*5113495bSYour Name 			      QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
2499*5113495bSYour Name 	dport = *(uint16_t *)(data + ipv6_offset +
2500*5113495bSYour Name 			      QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
2501*5113495bSYour Name 			      sizeof(uint16_t));
2502*5113495bSYour Name 
2503*5113495bSYour Name 	if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) &&
2504*5113495bSYour Name 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) ||
2505*5113495bSYour Name 	    ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) &&
2506*5113495bSYour Name 	     (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT))))
2507*5113495bSYour Name 		return true;
2508*5113495bSYour Name 	else
2509*5113495bSYour Name 		return false;
2510*5113495bSYour Name }
2511*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt);
2512*5113495bSYour Name 
__qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t * data)2513*5113495bSYour Name bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data)
2514*5113495bSYour Name {
2515*5113495bSYour Name 	uint16_t sport;
2516*5113495bSYour Name 	uint16_t dport;
2517*5113495bSYour Name 
2518*5113495bSYour Name 	sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
2519*5113495bSYour Name 				QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
2520*5113495bSYour Name 	dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
2521*5113495bSYour Name 					QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
2522*5113495bSYour Name 					sizeof(uint16_t));
2523*5113495bSYour Name 
2524*5113495bSYour Name 	if (sport == QDF_SWAP_U16(QDF_NBUF_TRAC_MDNS_SRC_N_DST_PORT) &&
2525*5113495bSYour Name 	    dport == sport)
2526*5113495bSYour Name 		return true;
2527*5113495bSYour Name 	else
2528*5113495bSYour Name 		return false;
2529*5113495bSYour Name }
2530*5113495bSYour Name 
2531*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_data_is_ipv6_mdns_pkt);
2532*5113495bSYour Name 
__qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t * data)2533*5113495bSYour Name bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data)
2534*5113495bSYour Name {
2535*5113495bSYour Name 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2536*5113495bSYour Name 		uint32_t *dst_addr =
2537*5113495bSYour Name 		      (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET);
2538*5113495bSYour Name 
2539*5113495bSYour Name 		/*
2540*5113495bSYour Name 		 * Check first word of the IPV4 address and if it is
2541*5113495bSYour Name 		 * equal to 0xE then it represents multicast IP.
2542*5113495bSYour Name 		 */
2543*5113495bSYour Name 		if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) ==
2544*5113495bSYour Name 				QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK)
2545*5113495bSYour Name 			return true;
2546*5113495bSYour Name 		else
2547*5113495bSYour Name 			return false;
2548*5113495bSYour Name 	} else
2549*5113495bSYour Name 		return false;
2550*5113495bSYour Name }
2551*5113495bSYour Name 
__qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t * data)2552*5113495bSYour Name bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data)
2553*5113495bSYour Name {
2554*5113495bSYour Name 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2555*5113495bSYour Name 		uint16_t *dst_addr;
2556*5113495bSYour Name 
2557*5113495bSYour Name 		dst_addr = (uint16_t *)
2558*5113495bSYour Name 			(data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET);
2559*5113495bSYour Name 
2560*5113495bSYour Name 		/*
2561*5113495bSYour Name 		 * Check first byte of the IP address and if it
2562*5113495bSYour Name 		 * 0xFF00 then it is a IPV6 mcast packet.
2563*5113495bSYour Name 		 */
2564*5113495bSYour Name 		if (*dst_addr ==
2565*5113495bSYour Name 		     QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR))
2566*5113495bSYour Name 			return true;
2567*5113495bSYour Name 		else
2568*5113495bSYour Name 			return false;
2569*5113495bSYour Name 	} else
2570*5113495bSYour Name 		return false;
2571*5113495bSYour Name }
2572*5113495bSYour Name 
__qdf_nbuf_data_is_icmp_pkt(uint8_t * data)2573*5113495bSYour Name bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data)
2574*5113495bSYour Name {
2575*5113495bSYour Name 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2576*5113495bSYour Name 		uint8_t pkt_type;
2577*5113495bSYour Name 
2578*5113495bSYour Name 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2579*5113495bSYour Name 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2580*5113495bSYour Name 
2581*5113495bSYour Name 		if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE)
2582*5113495bSYour Name 			return true;
2583*5113495bSYour Name 		else
2584*5113495bSYour Name 			return false;
2585*5113495bSYour Name 	} else
2586*5113495bSYour Name 		return false;
2587*5113495bSYour Name }
2588*5113495bSYour Name 
2589*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_data_is_icmp_pkt);
2590*5113495bSYour Name 
__qdf_nbuf_data_is_icmpv6_pkt(uint8_t * data)2591*5113495bSYour Name bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data)
2592*5113495bSYour Name {
2593*5113495bSYour Name 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2594*5113495bSYour Name 		uint8_t pkt_type;
2595*5113495bSYour Name 
2596*5113495bSYour Name 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2597*5113495bSYour Name 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2598*5113495bSYour Name 
2599*5113495bSYour Name 		if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
2600*5113495bSYour Name 			return true;
2601*5113495bSYour Name 		else
2602*5113495bSYour Name 			return false;
2603*5113495bSYour Name 	} else
2604*5113495bSYour Name 		return false;
2605*5113495bSYour Name }
2606*5113495bSYour Name 
2607*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_data_is_icmpv6_pkt);
2608*5113495bSYour Name 
__qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t * data)2609*5113495bSYour Name bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data)
2610*5113495bSYour Name {
2611*5113495bSYour Name 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2612*5113495bSYour Name 		uint8_t pkt_type;
2613*5113495bSYour Name 
2614*5113495bSYour Name 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2615*5113495bSYour Name 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2616*5113495bSYour Name 
2617*5113495bSYour Name 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2618*5113495bSYour Name 			return true;
2619*5113495bSYour Name 		else
2620*5113495bSYour Name 			return false;
2621*5113495bSYour Name 	} else
2622*5113495bSYour Name 		return false;
2623*5113495bSYour Name }
2624*5113495bSYour Name 
__qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t * data)2625*5113495bSYour Name bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data)
2626*5113495bSYour Name {
2627*5113495bSYour Name 	if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
2628*5113495bSYour Name 		uint8_t pkt_type;
2629*5113495bSYour Name 
2630*5113495bSYour Name 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2631*5113495bSYour Name 				QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
2632*5113495bSYour Name 
2633*5113495bSYour Name 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2634*5113495bSYour Name 			return true;
2635*5113495bSYour Name 		else
2636*5113495bSYour Name 			return false;
2637*5113495bSYour Name 	} else
2638*5113495bSYour Name 		return false;
2639*5113495bSYour Name }
2640*5113495bSYour Name 
__qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t * data)2641*5113495bSYour Name bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data)
2642*5113495bSYour Name {
2643*5113495bSYour Name 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2644*5113495bSYour Name 		uint8_t pkt_type;
2645*5113495bSYour Name 
2646*5113495bSYour Name 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2647*5113495bSYour Name 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2648*5113495bSYour Name 
2649*5113495bSYour Name 		if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
2650*5113495bSYour Name 			return true;
2651*5113495bSYour Name 		else
2652*5113495bSYour Name 			return false;
2653*5113495bSYour Name 	} else
2654*5113495bSYour Name 		return false;
2655*5113495bSYour Name }
2656*5113495bSYour Name 
__qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t * data)2657*5113495bSYour Name bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data)
2658*5113495bSYour Name {
2659*5113495bSYour Name 	if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
2660*5113495bSYour Name 		uint8_t pkt_type;
2661*5113495bSYour Name 
2662*5113495bSYour Name 		pkt_type = (uint8_t)(*(uint8_t *)(data +
2663*5113495bSYour Name 				QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
2664*5113495bSYour Name 
2665*5113495bSYour Name 		if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
2666*5113495bSYour Name 			return true;
2667*5113495bSYour Name 		else
2668*5113495bSYour Name 			return false;
2669*5113495bSYour Name 	} else
2670*5113495bSYour Name 		return false;
2671*5113495bSYour Name }
2672*5113495bSYour Name 
__qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)2673*5113495bSYour Name bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)
2674*5113495bSYour Name {
2675*5113495bSYour Name 	struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
2676*5113495bSYour Name 	return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest);
2677*5113495bSYour Name }
2678*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_is_bcast_pkt);
2679*5113495bSYour Name 
__qdf_nbuf_is_mcast_replay(qdf_nbuf_t nbuf)2680*5113495bSYour Name bool __qdf_nbuf_is_mcast_replay(qdf_nbuf_t nbuf)
2681*5113495bSYour Name {
2682*5113495bSYour Name 	struct sk_buff *skb = (struct sk_buff *)nbuf;
2683*5113495bSYour Name 	struct ethhdr *eth = eth_hdr(skb);
2684*5113495bSYour Name 
2685*5113495bSYour Name 	if (qdf_likely(skb->pkt_type != PACKET_MULTICAST))
2686*5113495bSYour Name 		return false;
2687*5113495bSYour Name 
2688*5113495bSYour Name 	if (qdf_unlikely(ether_addr_equal(eth->h_source, skb->dev->dev_addr)))
2689*5113495bSYour Name 		return true;
2690*5113495bSYour Name 
2691*5113495bSYour Name 	return false;
2692*5113495bSYour Name }
2693*5113495bSYour Name 
__qdf_nbuf_is_arp_local(struct sk_buff * skb)2694*5113495bSYour Name bool __qdf_nbuf_is_arp_local(struct sk_buff *skb)
2695*5113495bSYour Name {
2696*5113495bSYour Name 	struct arphdr *arp;
2697*5113495bSYour Name 	struct in_ifaddr **ifap = NULL;
2698*5113495bSYour Name 	struct in_ifaddr *ifa = NULL;
2699*5113495bSYour Name 	struct in_device *in_dev;
2700*5113495bSYour Name 	unsigned char *arp_ptr;
2701*5113495bSYour Name 	__be32 tip;
2702*5113495bSYour Name 
2703*5113495bSYour Name 	arp = (struct arphdr *)skb->data;
2704*5113495bSYour Name 	if (arp->ar_op == htons(ARPOP_REQUEST)) {
2705*5113495bSYour Name 		/* if fail to acquire rtnl lock, assume it's local arp */
2706*5113495bSYour Name 		if (!rtnl_trylock())
2707*5113495bSYour Name 			return true;
2708*5113495bSYour Name 
2709*5113495bSYour Name 		in_dev = __in_dev_get_rtnl(skb->dev);
2710*5113495bSYour Name 		if (in_dev) {
2711*5113495bSYour Name 			for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
2712*5113495bSYour Name 				ifap = &ifa->ifa_next) {
2713*5113495bSYour Name 				if (!strcmp(skb->dev->name, ifa->ifa_label))
2714*5113495bSYour Name 					break;
2715*5113495bSYour Name 			}
2716*5113495bSYour Name 		}
2717*5113495bSYour Name 
2718*5113495bSYour Name 		if (ifa && ifa->ifa_local) {
2719*5113495bSYour Name 			arp_ptr = (unsigned char *)(arp + 1);
2720*5113495bSYour Name 			arp_ptr += (skb->dev->addr_len + 4 +
2721*5113495bSYour Name 					skb->dev->addr_len);
2722*5113495bSYour Name 			memcpy(&tip, arp_ptr, 4);
2723*5113495bSYour Name 			qdf_debug("ARP packet: local IP: %x dest IP: %x",
2724*5113495bSYour Name 				  ifa->ifa_local, tip);
2725*5113495bSYour Name 			if (ifa->ifa_local == tip) {
2726*5113495bSYour Name 				rtnl_unlock();
2727*5113495bSYour Name 				return true;
2728*5113495bSYour Name 			}
2729*5113495bSYour Name 		}
2730*5113495bSYour Name 		rtnl_unlock();
2731*5113495bSYour Name 	}
2732*5113495bSYour Name 
2733*5113495bSYour Name 	return false;
2734*5113495bSYour Name }
2735*5113495bSYour Name 
2736*5113495bSYour Name /**
2737*5113495bSYour Name  * __qdf_nbuf_data_get_tcp_hdr_len() - get TCP header length
2738*5113495bSYour Name  * @data: pointer to data of network buffer
2739*5113495bSYour Name  * @tcp_hdr_len_offset: bytes offset for tcp header length of ethernet packets
2740*5113495bSYour Name  *
2741*5113495bSYour Name  * Return: TCP header length in unit of byte
2742*5113495bSYour Name  */
2743*5113495bSYour Name static inline
__qdf_nbuf_data_get_tcp_hdr_len(uint8_t * data,uint8_t tcp_hdr_len_offset)2744*5113495bSYour Name uint8_t __qdf_nbuf_data_get_tcp_hdr_len(uint8_t *data,
2745*5113495bSYour Name 					uint8_t tcp_hdr_len_offset)
2746*5113495bSYour Name {
2747*5113495bSYour Name 	uint8_t tcp_hdr_len;
2748*5113495bSYour Name 
2749*5113495bSYour Name 	tcp_hdr_len =
2750*5113495bSYour Name 		*((uint8_t *)(data + tcp_hdr_len_offset));
2751*5113495bSYour Name 
2752*5113495bSYour Name 	tcp_hdr_len = ((tcp_hdr_len & QDF_NBUF_PKT_TCP_HDR_LEN_MASK) >>
2753*5113495bSYour Name 		       QDF_NBUF_PKT_TCP_HDR_LEN_LSB) *
2754*5113495bSYour Name 		       QDF_NBUF_PKT_TCP_HDR_LEN_UNIT;
2755*5113495bSYour Name 
2756*5113495bSYour Name 	return tcp_hdr_len;
2757*5113495bSYour Name }
2758*5113495bSYour Name 
__qdf_nbuf_is_ipv4_v6_pure_tcp_ack(struct sk_buff * skb)2759*5113495bSYour Name bool __qdf_nbuf_is_ipv4_v6_pure_tcp_ack(struct sk_buff *skb)
2760*5113495bSYour Name {
2761*5113495bSYour Name 	bool is_tcp_ack = false;
2762*5113495bSYour Name 	uint8_t op_code, tcp_hdr_len;
2763*5113495bSYour Name 	uint16_t ip_payload_len;
2764*5113495bSYour Name 	uint8_t *data = skb->data;
2765*5113495bSYour Name 
2766*5113495bSYour Name 	/*
2767*5113495bSYour Name 	 * If packet length > TCP ACK max length or it's nonlinearized,
2768*5113495bSYour Name 	 * then it must not be TCP ACK.
2769*5113495bSYour Name 	 */
2770*5113495bSYour Name 	if (qdf_nbuf_len(skb) > QDF_NBUF_PKT_TCP_ACK_MAX_LEN ||
2771*5113495bSYour Name 	    qdf_nbuf_is_nonlinear(skb))
2772*5113495bSYour Name 		return false;
2773*5113495bSYour Name 
2774*5113495bSYour Name 	if (qdf_nbuf_is_ipv4_tcp_pkt(skb)) {
2775*5113495bSYour Name 		ip_payload_len =
2776*5113495bSYour Name 			QDF_SWAP_U16(*((uint16_t *)(data +
2777*5113495bSYour Name 				     QDF_NBUF_TRAC_IPV4_TOTAL_LEN_OFFSET)))
2778*5113495bSYour Name 					- QDF_NBUF_TRAC_IPV4_HEADER_SIZE;
2779*5113495bSYour Name 
2780*5113495bSYour Name 		tcp_hdr_len = __qdf_nbuf_data_get_tcp_hdr_len(
2781*5113495bSYour Name 					data,
2782*5113495bSYour Name 					QDF_NBUF_PKT_IPV4_TCP_HDR_LEN_OFFSET);
2783*5113495bSYour Name 
2784*5113495bSYour Name 		op_code = (uint8_t)(*(uint8_t *)(data +
2785*5113495bSYour Name 				QDF_NBUF_PKT_IPV4_TCP_OPCODE_OFFSET));
2786*5113495bSYour Name 
2787*5113495bSYour Name 		if (ip_payload_len == tcp_hdr_len &&
2788*5113495bSYour Name 		    op_code == QDF_NBUF_PKT_TCPOP_ACK)
2789*5113495bSYour Name 			is_tcp_ack = true;
2790*5113495bSYour Name 
2791*5113495bSYour Name 	} else if (qdf_nbuf_is_ipv6_tcp_pkt(skb)) {
2792*5113495bSYour Name 		ip_payload_len =
2793*5113495bSYour Name 			QDF_SWAP_U16(*((uint16_t *)(data +
2794*5113495bSYour Name 				QDF_NBUF_TRAC_IPV6_PAYLOAD_LEN_OFFSET)));
2795*5113495bSYour Name 
2796*5113495bSYour Name 		tcp_hdr_len = __qdf_nbuf_data_get_tcp_hdr_len(
2797*5113495bSYour Name 					data,
2798*5113495bSYour Name 					QDF_NBUF_PKT_IPV6_TCP_HDR_LEN_OFFSET);
2799*5113495bSYour Name 		op_code = (uint8_t)(*(uint8_t *)(data +
2800*5113495bSYour Name 				QDF_NBUF_PKT_IPV6_TCP_OPCODE_OFFSET));
2801*5113495bSYour Name 
2802*5113495bSYour Name 		if (ip_payload_len == tcp_hdr_len &&
2803*5113495bSYour Name 		    op_code == QDF_NBUF_PKT_TCPOP_ACK)
2804*5113495bSYour Name 			is_tcp_ack = true;
2805*5113495bSYour Name 	}
2806*5113495bSYour Name 
2807*5113495bSYour Name 	return is_tcp_ack;
2808*5113495bSYour Name }
2809*5113495bSYour Name 
2810*5113495bSYour Name #ifdef QCA_DP_NBUF_FAST_RECYCLE_CHECK
qdf_nbuf_fast_xmit(qdf_nbuf_t nbuf)2811*5113495bSYour Name bool qdf_nbuf_fast_xmit(qdf_nbuf_t nbuf)
2812*5113495bSYour Name {
2813*5113495bSYour Name 	return nbuf->fast_xmit;
2814*5113495bSYour Name }
2815*5113495bSYour Name 
2816*5113495bSYour Name qdf_export_symbol(qdf_nbuf_fast_xmit);
2817*5113495bSYour Name 
qdf_nbuf_set_fast_xmit(qdf_nbuf_t nbuf,int value)2818*5113495bSYour Name void qdf_nbuf_set_fast_xmit(qdf_nbuf_t nbuf, int value)
2819*5113495bSYour Name {
2820*5113495bSYour Name 	nbuf->fast_xmit = value;
2821*5113495bSYour Name }
2822*5113495bSYour Name 
2823*5113495bSYour Name qdf_export_symbol(qdf_nbuf_set_fast_xmit);
2824*5113495bSYour Name #else
qdf_nbuf_fast_xmit(qdf_nbuf_t nbuf)2825*5113495bSYour Name bool qdf_nbuf_fast_xmit(qdf_nbuf_t nbuf)
2826*5113495bSYour Name {
2827*5113495bSYour Name 	return false;
2828*5113495bSYour Name }
2829*5113495bSYour Name 
2830*5113495bSYour Name qdf_export_symbol(qdf_nbuf_fast_xmit);
2831*5113495bSYour Name 
qdf_nbuf_set_fast_xmit(qdf_nbuf_t nbuf,int value)2832*5113495bSYour Name void qdf_nbuf_set_fast_xmit(qdf_nbuf_t nbuf, int value)
2833*5113495bSYour Name {
2834*5113495bSYour Name }
2835*5113495bSYour Name 
2836*5113495bSYour Name qdf_export_symbol(qdf_nbuf_set_fast_xmit);
2837*5113495bSYour Name #endif
2838*5113495bSYour Name 
2839*5113495bSYour Name #ifdef NBUF_MEMORY_DEBUG
2840*5113495bSYour Name 
2841*5113495bSYour Name static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE];
2842*5113495bSYour Name 
2843*5113495bSYour Name static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
2844*5113495bSYour Name static struct kmem_cache *nbuf_tracking_cache;
2845*5113495bSYour Name static QDF_NBUF_TRACK *qdf_net_buf_track_free_list;
2846*5113495bSYour Name static spinlock_t qdf_net_buf_track_free_list_lock;
2847*5113495bSYour Name static uint32_t qdf_net_buf_track_free_list_count;
2848*5113495bSYour Name static uint32_t qdf_net_buf_track_used_list_count;
2849*5113495bSYour Name static uint32_t qdf_net_buf_track_max_used;
2850*5113495bSYour Name static uint32_t qdf_net_buf_track_max_free;
2851*5113495bSYour Name static uint32_t qdf_net_buf_track_max_allocated;
2852*5113495bSYour Name static uint32_t qdf_net_buf_track_fail_count;
2853*5113495bSYour Name 
2854*5113495bSYour Name /**
2855*5113495bSYour Name  * update_max_used() - update qdf_net_buf_track_max_used tracking variable
2856*5113495bSYour Name  *
2857*5113495bSYour Name  * tracks the max number of network buffers that the wlan driver was tracking
2858*5113495bSYour Name  * at any one time.
2859*5113495bSYour Name  *
2860*5113495bSYour Name  * Return: none
2861*5113495bSYour Name  */
update_max_used(void)2862*5113495bSYour Name static inline void update_max_used(void)
2863*5113495bSYour Name {
2864*5113495bSYour Name 	int sum;
2865*5113495bSYour Name 
2866*5113495bSYour Name 	if (qdf_net_buf_track_max_used <
2867*5113495bSYour Name 	    qdf_net_buf_track_used_list_count)
2868*5113495bSYour Name 		qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count;
2869*5113495bSYour Name 	sum = qdf_net_buf_track_free_list_count +
2870*5113495bSYour Name 		qdf_net_buf_track_used_list_count;
2871*5113495bSYour Name 	if (qdf_net_buf_track_max_allocated < sum)
2872*5113495bSYour Name 		qdf_net_buf_track_max_allocated = sum;
2873*5113495bSYour Name }
2874*5113495bSYour Name 
2875*5113495bSYour Name /**
2876*5113495bSYour Name  * update_max_free() - update qdf_net_buf_track_free_list_count
2877*5113495bSYour Name  *
2878*5113495bSYour Name  * tracks the max number tracking buffers kept in the freelist.
2879*5113495bSYour Name  *
2880*5113495bSYour Name  * Return: none
2881*5113495bSYour Name  */
update_max_free(void)2882*5113495bSYour Name static inline void update_max_free(void)
2883*5113495bSYour Name {
2884*5113495bSYour Name 	if (qdf_net_buf_track_max_free <
2885*5113495bSYour Name 	    qdf_net_buf_track_free_list_count)
2886*5113495bSYour Name 		qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count;
2887*5113495bSYour Name }
2888*5113495bSYour Name 
2889*5113495bSYour Name /**
2890*5113495bSYour Name  * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan
2891*5113495bSYour Name  *
2892*5113495bSYour Name  * This function pulls from a freelist if possible and uses kmem_cache_alloc.
2893*5113495bSYour Name  * This function also ads fexibility to adjust the allocation and freelist
2894*5113495bSYour Name  * scheems.
2895*5113495bSYour Name  *
2896*5113495bSYour Name  * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed.
2897*5113495bSYour Name  */
qdf_nbuf_track_alloc(void)2898*5113495bSYour Name static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void)
2899*5113495bSYour Name {
2900*5113495bSYour Name 	int flags = GFP_KERNEL;
2901*5113495bSYour Name 	unsigned long irq_flag;
2902*5113495bSYour Name 	QDF_NBUF_TRACK *new_node = NULL;
2903*5113495bSYour Name 
2904*5113495bSYour Name 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2905*5113495bSYour Name 	qdf_net_buf_track_used_list_count++;
2906*5113495bSYour Name 	if (qdf_net_buf_track_free_list) {
2907*5113495bSYour Name 		new_node = qdf_net_buf_track_free_list;
2908*5113495bSYour Name 		qdf_net_buf_track_free_list =
2909*5113495bSYour Name 			qdf_net_buf_track_free_list->p_next;
2910*5113495bSYour Name 		qdf_net_buf_track_free_list_count--;
2911*5113495bSYour Name 	}
2912*5113495bSYour Name 	update_max_used();
2913*5113495bSYour Name 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2914*5113495bSYour Name 
2915*5113495bSYour Name 	if (new_node)
2916*5113495bSYour Name 		return new_node;
2917*5113495bSYour Name 
2918*5113495bSYour Name 	if (in_interrupt() || irqs_disabled() || in_atomic())
2919*5113495bSYour Name 		flags = GFP_ATOMIC;
2920*5113495bSYour Name 
2921*5113495bSYour Name 	return kmem_cache_alloc(nbuf_tracking_cache, flags);
2922*5113495bSYour Name }
2923*5113495bSYour Name 
2924*5113495bSYour Name /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */
2925*5113495bSYour Name #define FREEQ_POOLSIZE 2048
2926*5113495bSYour Name 
2927*5113495bSYour Name /**
2928*5113495bSYour Name  * qdf_nbuf_track_free() - free the nbuf tracking cookie.
2929*5113495bSYour Name  * @node: nbuf tracking node
2930*5113495bSYour Name  *
2931*5113495bSYour Name  * Matches calls to qdf_nbuf_track_alloc.
2932*5113495bSYour Name  * Either frees the tracking cookie to kernel or an internal
2933*5113495bSYour Name  * freelist based on the size of the freelist.
2934*5113495bSYour Name  *
2935*5113495bSYour Name  * Return: none
2936*5113495bSYour Name  */
qdf_nbuf_track_free(QDF_NBUF_TRACK * node)2937*5113495bSYour Name static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node)
2938*5113495bSYour Name {
2939*5113495bSYour Name 	unsigned long irq_flag;
2940*5113495bSYour Name 
2941*5113495bSYour Name 	if (!node)
2942*5113495bSYour Name 		return;
2943*5113495bSYour Name 
2944*5113495bSYour Name 	/* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE
2945*5113495bSYour Name 	 * only shrink the freelist if it is bigger than twice the number of
2946*5113495bSYour Name 	 * nbufs in use. If the driver is stalling in a consistent bursty
2947*5113495bSYour Name 	 * fashion, this will keep 3/4 of thee allocations from the free list
2948*5113495bSYour Name 	 * while also allowing the system to recover memory as less frantic
2949*5113495bSYour Name 	 * traffic occurs.
2950*5113495bSYour Name 	 */
2951*5113495bSYour Name 
2952*5113495bSYour Name 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
2953*5113495bSYour Name 
2954*5113495bSYour Name 	qdf_net_buf_track_used_list_count--;
2955*5113495bSYour Name 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
2956*5113495bSYour Name 	   (qdf_net_buf_track_free_list_count >
2957*5113495bSYour Name 	    qdf_net_buf_track_used_list_count << 1)) {
2958*5113495bSYour Name 		kmem_cache_free(nbuf_tracking_cache, node);
2959*5113495bSYour Name 	} else {
2960*5113495bSYour Name 		node->p_next = qdf_net_buf_track_free_list;
2961*5113495bSYour Name 		qdf_net_buf_track_free_list = node;
2962*5113495bSYour Name 		qdf_net_buf_track_free_list_count++;
2963*5113495bSYour Name 	}
2964*5113495bSYour Name 	update_max_free();
2965*5113495bSYour Name 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
2966*5113495bSYour Name }
2967*5113495bSYour Name 
2968*5113495bSYour Name /**
2969*5113495bSYour Name  * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist
2970*5113495bSYour Name  *
2971*5113495bSYour Name  * Removes a 'warmup time' characteristic of the freelist.  Prefilling
2972*5113495bSYour Name  * the freelist first makes it performant for the first iperf udp burst
2973*5113495bSYour Name  * as well as steady state.
2974*5113495bSYour Name  *
2975*5113495bSYour Name  * Return: None
2976*5113495bSYour Name  */
qdf_nbuf_track_prefill(void)2977*5113495bSYour Name static void qdf_nbuf_track_prefill(void)
2978*5113495bSYour Name {
2979*5113495bSYour Name 	int i;
2980*5113495bSYour Name 	QDF_NBUF_TRACK *node, *head;
2981*5113495bSYour Name 
2982*5113495bSYour Name 	/* prepopulate the freelist */
2983*5113495bSYour Name 	head = NULL;
2984*5113495bSYour Name 	for (i = 0; i < FREEQ_POOLSIZE; i++) {
2985*5113495bSYour Name 		node = qdf_nbuf_track_alloc();
2986*5113495bSYour Name 		if (!node)
2987*5113495bSYour Name 			continue;
2988*5113495bSYour Name 		node->p_next = head;
2989*5113495bSYour Name 		head = node;
2990*5113495bSYour Name 	}
2991*5113495bSYour Name 	while (head) {
2992*5113495bSYour Name 		node = head->p_next;
2993*5113495bSYour Name 		qdf_nbuf_track_free(head);
2994*5113495bSYour Name 		head = node;
2995*5113495bSYour Name 	}
2996*5113495bSYour Name 
2997*5113495bSYour Name 	/* prefilled buffers should not count as used */
2998*5113495bSYour Name 	qdf_net_buf_track_max_used = 0;
2999*5113495bSYour Name }
3000*5113495bSYour Name 
3001*5113495bSYour Name /**
3002*5113495bSYour Name  * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies
3003*5113495bSYour Name  *
3004*5113495bSYour Name  * This initializes the memory manager for the nbuf tracking cookies.  Because
3005*5113495bSYour Name  * these cookies are all the same size and only used in this feature, we can
3006*5113495bSYour Name  * use a kmem_cache to provide tracking as well as to speed up allocations.
3007*5113495bSYour Name  * To avoid the overhead of allocating and freeing the buffers (including SLUB
3008*5113495bSYour Name  * features) a freelist is prepopulated here.
3009*5113495bSYour Name  *
3010*5113495bSYour Name  * Return: None
3011*5113495bSYour Name  */
qdf_nbuf_track_memory_manager_create(void)3012*5113495bSYour Name static void qdf_nbuf_track_memory_manager_create(void)
3013*5113495bSYour Name {
3014*5113495bSYour Name 	spin_lock_init(&qdf_net_buf_track_free_list_lock);
3015*5113495bSYour Name 	nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache",
3016*5113495bSYour Name 						sizeof(QDF_NBUF_TRACK),
3017*5113495bSYour Name 						0, 0, NULL);
3018*5113495bSYour Name 
3019*5113495bSYour Name 	qdf_nbuf_track_prefill();
3020*5113495bSYour Name }
3021*5113495bSYour Name 
3022*5113495bSYour Name /**
3023*5113495bSYour Name  * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies
3024*5113495bSYour Name  *
3025*5113495bSYour Name  * Empty the freelist and print out usage statistics when it is no longer
3026*5113495bSYour Name  * needed. Also the kmem_cache should be destroyed here so that it can warn if
3027*5113495bSYour Name  * any nbuf tracking cookies were leaked.
3028*5113495bSYour Name  *
3029*5113495bSYour Name  * Return: None
3030*5113495bSYour Name  */
qdf_nbuf_track_memory_manager_destroy(void)3031*5113495bSYour Name static void qdf_nbuf_track_memory_manager_destroy(void)
3032*5113495bSYour Name {
3033*5113495bSYour Name 	QDF_NBUF_TRACK *node, *tmp;
3034*5113495bSYour Name 	unsigned long irq_flag;
3035*5113495bSYour Name 
3036*5113495bSYour Name 	spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
3037*5113495bSYour Name 	node = qdf_net_buf_track_free_list;
3038*5113495bSYour Name 
3039*5113495bSYour Name 	if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4)
3040*5113495bSYour Name 		qdf_print("%s: unexpectedly large max_used count %d",
3041*5113495bSYour Name 			  __func__, qdf_net_buf_track_max_used);
3042*5113495bSYour Name 
3043*5113495bSYour Name 	if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated)
3044*5113495bSYour Name 		qdf_print("%s: %d unused trackers were allocated",
3045*5113495bSYour Name 			  __func__,
3046*5113495bSYour Name 			  qdf_net_buf_track_max_allocated -
3047*5113495bSYour Name 			  qdf_net_buf_track_max_used);
3048*5113495bSYour Name 
3049*5113495bSYour Name 	if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
3050*5113495bSYour Name 	    qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4)
3051*5113495bSYour Name 		qdf_print("%s: check freelist shrinking functionality",
3052*5113495bSYour Name 			  __func__);
3053*5113495bSYour Name 
3054*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
3055*5113495bSYour Name 		  "%s: %d residual freelist size",
3056*5113495bSYour Name 		  __func__, qdf_net_buf_track_free_list_count);
3057*5113495bSYour Name 
3058*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
3059*5113495bSYour Name 		  "%s: %d max freelist size observed",
3060*5113495bSYour Name 		  __func__, qdf_net_buf_track_max_free);
3061*5113495bSYour Name 
3062*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
3063*5113495bSYour Name 		  "%s: %d max buffers used observed",
3064*5113495bSYour Name 		  __func__, qdf_net_buf_track_max_used);
3065*5113495bSYour Name 
3066*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
3067*5113495bSYour Name 		  "%s: %d max buffers allocated observed",
3068*5113495bSYour Name 		  __func__, qdf_net_buf_track_max_allocated);
3069*5113495bSYour Name 
3070*5113495bSYour Name 	while (node) {
3071*5113495bSYour Name 		tmp = node;
3072*5113495bSYour Name 		node = node->p_next;
3073*5113495bSYour Name 		kmem_cache_free(nbuf_tracking_cache, tmp);
3074*5113495bSYour Name 		qdf_net_buf_track_free_list_count--;
3075*5113495bSYour Name 	}
3076*5113495bSYour Name 
3077*5113495bSYour Name 	if (qdf_net_buf_track_free_list_count != 0)
3078*5113495bSYour Name 		qdf_info("%d unfreed tracking memory lost in freelist",
3079*5113495bSYour Name 			 qdf_net_buf_track_free_list_count);
3080*5113495bSYour Name 
3081*5113495bSYour Name 	if (qdf_net_buf_track_used_list_count != 0)
3082*5113495bSYour Name 		qdf_info("%d unfreed tracking memory still in use",
3083*5113495bSYour Name 			 qdf_net_buf_track_used_list_count);
3084*5113495bSYour Name 
3085*5113495bSYour Name 	spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
3086*5113495bSYour Name 	kmem_cache_destroy(nbuf_tracking_cache);
3087*5113495bSYour Name 	qdf_net_buf_track_free_list = NULL;
3088*5113495bSYour Name }
3089*5113495bSYour Name 
qdf_net_buf_debug_init(void)3090*5113495bSYour Name void qdf_net_buf_debug_init(void)
3091*5113495bSYour Name {
3092*5113495bSYour Name 	uint32_t i;
3093*5113495bSYour Name 
3094*5113495bSYour Name 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
3095*5113495bSYour Name 
3096*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
3097*5113495bSYour Name 		return;
3098*5113495bSYour Name 
3099*5113495bSYour Name 	qdf_atomic_set(&qdf_nbuf_history_index, -1);
3100*5113495bSYour Name 
3101*5113495bSYour Name 	qdf_nbuf_map_tracking_init();
3102*5113495bSYour Name 	qdf_nbuf_smmu_map_tracking_init();
3103*5113495bSYour Name 	qdf_nbuf_track_memory_manager_create();
3104*5113495bSYour Name 
3105*5113495bSYour Name 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
3106*5113495bSYour Name 		gp_qdf_net_buf_track_tbl[i] = NULL;
3107*5113495bSYour Name 		spin_lock_init(&g_qdf_net_buf_track_lock[i]);
3108*5113495bSYour Name 	}
3109*5113495bSYour Name }
3110*5113495bSYour Name qdf_export_symbol(qdf_net_buf_debug_init);
3111*5113495bSYour Name 
qdf_net_buf_debug_exit(void)3112*5113495bSYour Name void qdf_net_buf_debug_exit(void)
3113*5113495bSYour Name {
3114*5113495bSYour Name 	uint32_t i;
3115*5113495bSYour Name 	uint32_t count = 0;
3116*5113495bSYour Name 	unsigned long irq_flag;
3117*5113495bSYour Name 	QDF_NBUF_TRACK *p_node;
3118*5113495bSYour Name 	QDF_NBUF_TRACK *p_prev;
3119*5113495bSYour Name 
3120*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
3121*5113495bSYour Name 		return;
3122*5113495bSYour Name 
3123*5113495bSYour Name 	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
3124*5113495bSYour Name 		spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3125*5113495bSYour Name 		p_node = gp_qdf_net_buf_track_tbl[i];
3126*5113495bSYour Name 		while (p_node) {
3127*5113495bSYour Name 			p_prev = p_node;
3128*5113495bSYour Name 			p_node = p_node->p_next;
3129*5113495bSYour Name 			count++;
3130*5113495bSYour Name 			qdf_info("SKB buf memory Leak@ Func %s, @Line %d, size %zu, nbuf %pK",
3131*5113495bSYour Name 				 p_prev->func_name, p_prev->line_num,
3132*5113495bSYour Name 				 p_prev->size, p_prev->net_buf);
3133*5113495bSYour Name 			qdf_info("SKB leak map %s, line %d, unmap %s line %d mapped=%d",
3134*5113495bSYour Name 				 p_prev->map_func_name,
3135*5113495bSYour Name 				 p_prev->map_line_num,
3136*5113495bSYour Name 				 p_prev->unmap_func_name,
3137*5113495bSYour Name 				 p_prev->unmap_line_num,
3138*5113495bSYour Name 				 p_prev->is_nbuf_mapped);
3139*5113495bSYour Name 			qdf_nbuf_track_free(p_prev);
3140*5113495bSYour Name 		}
3141*5113495bSYour Name 		spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3142*5113495bSYour Name 	}
3143*5113495bSYour Name 
3144*5113495bSYour Name 	qdf_nbuf_track_memory_manager_destroy();
3145*5113495bSYour Name 	qdf_nbuf_map_tracking_deinit();
3146*5113495bSYour Name 	qdf_nbuf_smmu_map_tracking_deinit();
3147*5113495bSYour Name 
3148*5113495bSYour Name #ifdef CONFIG_HALT_KMEMLEAK
3149*5113495bSYour Name 	if (count) {
3150*5113495bSYour Name 		qdf_err("%d SKBs leaked .. please fix the SKB leak", count);
3151*5113495bSYour Name 		QDF_BUG(0);
3152*5113495bSYour Name 	}
3153*5113495bSYour Name #endif
3154*5113495bSYour Name }
3155*5113495bSYour Name qdf_export_symbol(qdf_net_buf_debug_exit);
3156*5113495bSYour Name 
3157*5113495bSYour Name /**
3158*5113495bSYour Name  * qdf_net_buf_debug_hash() - hash network buffer pointer
3159*5113495bSYour Name  * @net_buf: network buffer
3160*5113495bSYour Name  *
3161*5113495bSYour Name  * Return: hash value
3162*5113495bSYour Name  */
qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)3163*5113495bSYour Name static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
3164*5113495bSYour Name {
3165*5113495bSYour Name 	uint32_t i;
3166*5113495bSYour Name 
3167*5113495bSYour Name 	i = (uint32_t) (((uintptr_t) net_buf) >> 4);
3168*5113495bSYour Name 	i += (uint32_t) (((uintptr_t) net_buf) >> 14);
3169*5113495bSYour Name 	i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1);
3170*5113495bSYour Name 
3171*5113495bSYour Name 	return i;
3172*5113495bSYour Name }
3173*5113495bSYour Name 
3174*5113495bSYour Name /**
3175*5113495bSYour Name  * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
3176*5113495bSYour Name  * @net_buf: network buffer
3177*5113495bSYour Name  *
3178*5113495bSYour Name  * Return: If skb is found in hash table then return pointer to network buffer
3179*5113495bSYour Name  *	else return %NULL
3180*5113495bSYour Name  */
qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)3181*5113495bSYour Name static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
3182*5113495bSYour Name {
3183*5113495bSYour Name 	uint32_t i;
3184*5113495bSYour Name 	QDF_NBUF_TRACK *p_node;
3185*5113495bSYour Name 
3186*5113495bSYour Name 	i = qdf_net_buf_debug_hash(net_buf);
3187*5113495bSYour Name 	p_node = gp_qdf_net_buf_track_tbl[i];
3188*5113495bSYour Name 
3189*5113495bSYour Name 	while (p_node) {
3190*5113495bSYour Name 		if (p_node->net_buf == net_buf)
3191*5113495bSYour Name 			return p_node;
3192*5113495bSYour Name 		p_node = p_node->p_next;
3193*5113495bSYour Name 	}
3194*5113495bSYour Name 
3195*5113495bSYour Name 	return NULL;
3196*5113495bSYour Name }
3197*5113495bSYour Name 
qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf,size_t size,const char * func_name,uint32_t line_num)3198*5113495bSYour Name void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
3199*5113495bSYour Name 				const char *func_name, uint32_t line_num)
3200*5113495bSYour Name {
3201*5113495bSYour Name 	uint32_t i;
3202*5113495bSYour Name 	unsigned long irq_flag;
3203*5113495bSYour Name 	QDF_NBUF_TRACK *p_node;
3204*5113495bSYour Name 	QDF_NBUF_TRACK *new_node;
3205*5113495bSYour Name 
3206*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
3207*5113495bSYour Name 		return;
3208*5113495bSYour Name 
3209*5113495bSYour Name 	new_node = qdf_nbuf_track_alloc();
3210*5113495bSYour Name 
3211*5113495bSYour Name 	i = qdf_net_buf_debug_hash(net_buf);
3212*5113495bSYour Name 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3213*5113495bSYour Name 
3214*5113495bSYour Name 	p_node = qdf_net_buf_debug_look_up(net_buf);
3215*5113495bSYour Name 
3216*5113495bSYour Name 	if (p_node) {
3217*5113495bSYour Name 		qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d",
3218*5113495bSYour Name 			  p_node->net_buf, p_node->func_name, p_node->line_num,
3219*5113495bSYour Name 			  net_buf, func_name, line_num);
3220*5113495bSYour Name 		qdf_nbuf_track_free(new_node);
3221*5113495bSYour Name 	} else {
3222*5113495bSYour Name 		p_node = new_node;
3223*5113495bSYour Name 		if (p_node) {
3224*5113495bSYour Name 			p_node->net_buf = net_buf;
3225*5113495bSYour Name 			qdf_str_lcopy(p_node->func_name, func_name,
3226*5113495bSYour Name 				      QDF_MEM_FUNC_NAME_SIZE);
3227*5113495bSYour Name 			p_node->line_num = line_num;
3228*5113495bSYour Name 			p_node->is_nbuf_mapped = false;
3229*5113495bSYour Name 			p_node->map_line_num = 0;
3230*5113495bSYour Name 			p_node->unmap_line_num = 0;
3231*5113495bSYour Name 			p_node->map_func_name[0] = '\0';
3232*5113495bSYour Name 			p_node->unmap_func_name[0] = '\0';
3233*5113495bSYour Name 			p_node->size = size;
3234*5113495bSYour Name 			p_node->time = qdf_get_log_timestamp();
3235*5113495bSYour Name 			qdf_net_buf_update_smmu_params(p_node);
3236*5113495bSYour Name 			qdf_mem_skb_inc(size);
3237*5113495bSYour Name 			p_node->p_next = gp_qdf_net_buf_track_tbl[i];
3238*5113495bSYour Name 			gp_qdf_net_buf_track_tbl[i] = p_node;
3239*5113495bSYour Name 		} else {
3240*5113495bSYour Name 			qdf_net_buf_track_fail_count++;
3241*5113495bSYour Name 			qdf_print(
3242*5113495bSYour Name 				  "Mem alloc failed ! Could not track skb from %s %d of size %zu",
3243*5113495bSYour Name 				  func_name, line_num, size);
3244*5113495bSYour Name 		}
3245*5113495bSYour Name 	}
3246*5113495bSYour Name 
3247*5113495bSYour Name 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3248*5113495bSYour Name }
3249*5113495bSYour Name qdf_export_symbol(qdf_net_buf_debug_add_node);
3250*5113495bSYour Name 
qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf,const char * func_name,uint32_t line_num)3251*5113495bSYour Name void qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf, const char *func_name,
3252*5113495bSYour Name 				   uint32_t line_num)
3253*5113495bSYour Name {
3254*5113495bSYour Name 	uint32_t i;
3255*5113495bSYour Name 	unsigned long irq_flag;
3256*5113495bSYour Name 	QDF_NBUF_TRACK *p_node;
3257*5113495bSYour Name 
3258*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
3259*5113495bSYour Name 		return;
3260*5113495bSYour Name 
3261*5113495bSYour Name 	i = qdf_net_buf_debug_hash(net_buf);
3262*5113495bSYour Name 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3263*5113495bSYour Name 
3264*5113495bSYour Name 	p_node = qdf_net_buf_debug_look_up(net_buf);
3265*5113495bSYour Name 
3266*5113495bSYour Name 	if (p_node) {
3267*5113495bSYour Name 		qdf_str_lcopy(p_node->func_name, kbasename(func_name),
3268*5113495bSYour Name 			      QDF_MEM_FUNC_NAME_SIZE);
3269*5113495bSYour Name 		p_node->line_num = line_num;
3270*5113495bSYour Name 	}
3271*5113495bSYour Name 
3272*5113495bSYour Name 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3273*5113495bSYour Name }
3274*5113495bSYour Name 
3275*5113495bSYour Name qdf_export_symbol(qdf_net_buf_debug_update_node);
3276*5113495bSYour Name 
qdf_net_buf_debug_update_map_node(qdf_nbuf_t net_buf,const char * func_name,uint32_t line_num)3277*5113495bSYour Name void qdf_net_buf_debug_update_map_node(qdf_nbuf_t net_buf,
3278*5113495bSYour Name 				       const char *func_name,
3279*5113495bSYour Name 				       uint32_t line_num)
3280*5113495bSYour Name {
3281*5113495bSYour Name 	uint32_t i;
3282*5113495bSYour Name 	unsigned long irq_flag;
3283*5113495bSYour Name 	QDF_NBUF_TRACK *p_node;
3284*5113495bSYour Name 
3285*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
3286*5113495bSYour Name 		return;
3287*5113495bSYour Name 
3288*5113495bSYour Name 	i = qdf_net_buf_debug_hash(net_buf);
3289*5113495bSYour Name 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3290*5113495bSYour Name 
3291*5113495bSYour Name 	p_node = qdf_net_buf_debug_look_up(net_buf);
3292*5113495bSYour Name 
3293*5113495bSYour Name 	if (p_node) {
3294*5113495bSYour Name 		qdf_str_lcopy(p_node->map_func_name, func_name,
3295*5113495bSYour Name 			      QDF_MEM_FUNC_NAME_SIZE);
3296*5113495bSYour Name 		p_node->map_line_num = line_num;
3297*5113495bSYour Name 		p_node->is_nbuf_mapped = true;
3298*5113495bSYour Name 	}
3299*5113495bSYour Name 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3300*5113495bSYour Name }
3301*5113495bSYour Name 
3302*5113495bSYour Name #ifdef NBUF_SMMU_MAP_UNMAP_DEBUG
qdf_net_buf_debug_update_smmu_map_node(qdf_nbuf_t nbuf,unsigned long iova,unsigned long pa,const char * func,uint32_t line)3303*5113495bSYour Name void qdf_net_buf_debug_update_smmu_map_node(qdf_nbuf_t nbuf,
3304*5113495bSYour Name 					    unsigned long iova,
3305*5113495bSYour Name 					    unsigned long pa,
3306*5113495bSYour Name 					    const char *func,
3307*5113495bSYour Name 					    uint32_t line)
3308*5113495bSYour Name {
3309*5113495bSYour Name 	uint32_t i;
3310*5113495bSYour Name 	unsigned long irq_flag;
3311*5113495bSYour Name 	QDF_NBUF_TRACK *p_node;
3312*5113495bSYour Name 
3313*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
3314*5113495bSYour Name 		return;
3315*5113495bSYour Name 
3316*5113495bSYour Name 	i = qdf_net_buf_debug_hash(nbuf);
3317*5113495bSYour Name 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3318*5113495bSYour Name 
3319*5113495bSYour Name 	p_node = qdf_net_buf_debug_look_up(nbuf);
3320*5113495bSYour Name 
3321*5113495bSYour Name 	if (p_node) {
3322*5113495bSYour Name 		qdf_str_lcopy(p_node->smmu_map_func_name, func,
3323*5113495bSYour Name 			      QDF_MEM_FUNC_NAME_SIZE);
3324*5113495bSYour Name 		p_node->smmu_map_line_num = line;
3325*5113495bSYour Name 		p_node->is_nbuf_smmu_mapped = true;
3326*5113495bSYour Name 		p_node->smmu_map_iova_addr = iova;
3327*5113495bSYour Name 		p_node->smmu_map_pa_addr = pa;
3328*5113495bSYour Name 	}
3329*5113495bSYour Name 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3330*5113495bSYour Name }
3331*5113495bSYour Name 
qdf_net_buf_debug_update_smmu_unmap_node(qdf_nbuf_t nbuf,unsigned long iova,unsigned long pa,const char * func,uint32_t line)3332*5113495bSYour Name void qdf_net_buf_debug_update_smmu_unmap_node(qdf_nbuf_t nbuf,
3333*5113495bSYour Name 					      unsigned long iova,
3334*5113495bSYour Name 					      unsigned long pa,
3335*5113495bSYour Name 					      const char *func,
3336*5113495bSYour Name 					      uint32_t line)
3337*5113495bSYour Name {
3338*5113495bSYour Name 	uint32_t i;
3339*5113495bSYour Name 	unsigned long irq_flag;
3340*5113495bSYour Name 	QDF_NBUF_TRACK *p_node;
3341*5113495bSYour Name 
3342*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
3343*5113495bSYour Name 		return;
3344*5113495bSYour Name 
3345*5113495bSYour Name 	i = qdf_net_buf_debug_hash(nbuf);
3346*5113495bSYour Name 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3347*5113495bSYour Name 
3348*5113495bSYour Name 	p_node = qdf_net_buf_debug_look_up(nbuf);
3349*5113495bSYour Name 
3350*5113495bSYour Name 	if (p_node) {
3351*5113495bSYour Name 		qdf_str_lcopy(p_node->smmu_unmap_func_name, func,
3352*5113495bSYour Name 			      QDF_MEM_FUNC_NAME_SIZE);
3353*5113495bSYour Name 		p_node->smmu_unmap_line_num = line;
3354*5113495bSYour Name 		p_node->is_nbuf_smmu_mapped = false;
3355*5113495bSYour Name 		p_node->smmu_unmap_iova_addr = iova;
3356*5113495bSYour Name 		p_node->smmu_unmap_pa_addr = pa;
3357*5113495bSYour Name 	}
3358*5113495bSYour Name 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3359*5113495bSYour Name }
3360*5113495bSYour Name #endif
3361*5113495bSYour Name 
qdf_net_buf_debug_update_unmap_node(qdf_nbuf_t net_buf,const char * func_name,uint32_t line_num)3362*5113495bSYour Name void qdf_net_buf_debug_update_unmap_node(qdf_nbuf_t net_buf,
3363*5113495bSYour Name 					 const char *func_name,
3364*5113495bSYour Name 					 uint32_t line_num)
3365*5113495bSYour Name {
3366*5113495bSYour Name 	uint32_t i;
3367*5113495bSYour Name 	unsigned long irq_flag;
3368*5113495bSYour Name 	QDF_NBUF_TRACK *p_node;
3369*5113495bSYour Name 
3370*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
3371*5113495bSYour Name 		return;
3372*5113495bSYour Name 
3373*5113495bSYour Name 	i = qdf_net_buf_debug_hash(net_buf);
3374*5113495bSYour Name 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3375*5113495bSYour Name 
3376*5113495bSYour Name 	p_node = qdf_net_buf_debug_look_up(net_buf);
3377*5113495bSYour Name 
3378*5113495bSYour Name 	if (p_node) {
3379*5113495bSYour Name 		qdf_str_lcopy(p_node->unmap_func_name, func_name,
3380*5113495bSYour Name 			      QDF_MEM_FUNC_NAME_SIZE);
3381*5113495bSYour Name 		p_node->unmap_line_num = line_num;
3382*5113495bSYour Name 		p_node->is_nbuf_mapped = false;
3383*5113495bSYour Name 	}
3384*5113495bSYour Name 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3385*5113495bSYour Name }
3386*5113495bSYour Name 
qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)3387*5113495bSYour Name void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
3388*5113495bSYour Name {
3389*5113495bSYour Name 	uint32_t i;
3390*5113495bSYour Name 	QDF_NBUF_TRACK *p_head;
3391*5113495bSYour Name 	QDF_NBUF_TRACK *p_node = NULL;
3392*5113495bSYour Name 	unsigned long irq_flag;
3393*5113495bSYour Name 	QDF_NBUF_TRACK *p_prev;
3394*5113495bSYour Name 
3395*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
3396*5113495bSYour Name 		return;
3397*5113495bSYour Name 
3398*5113495bSYour Name 	i = qdf_net_buf_debug_hash(net_buf);
3399*5113495bSYour Name 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
3400*5113495bSYour Name 
3401*5113495bSYour Name 	p_head = gp_qdf_net_buf_track_tbl[i];
3402*5113495bSYour Name 
3403*5113495bSYour Name 	/* Unallocated SKB */
3404*5113495bSYour Name 	if (!p_head)
3405*5113495bSYour Name 		goto done;
3406*5113495bSYour Name 
3407*5113495bSYour Name 	p_node = p_head;
3408*5113495bSYour Name 	/* Found at head of the table */
3409*5113495bSYour Name 	if (p_head->net_buf == net_buf) {
3410*5113495bSYour Name 		gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
3411*5113495bSYour Name 		goto done;
3412*5113495bSYour Name 	}
3413*5113495bSYour Name 
3414*5113495bSYour Name 	/* Search in collision list */
3415*5113495bSYour Name 	while (p_node) {
3416*5113495bSYour Name 		p_prev = p_node;
3417*5113495bSYour Name 		p_node = p_node->p_next;
3418*5113495bSYour Name 		if ((p_node) && (p_node->net_buf == net_buf)) {
3419*5113495bSYour Name 			p_prev->p_next = p_node->p_next;
3420*5113495bSYour Name 			break;
3421*5113495bSYour Name 		}
3422*5113495bSYour Name 	}
3423*5113495bSYour Name 
3424*5113495bSYour Name done:
3425*5113495bSYour Name 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
3426*5113495bSYour Name 
3427*5113495bSYour Name 	if (p_node) {
3428*5113495bSYour Name 		qdf_mem_skb_dec(p_node->size);
3429*5113495bSYour Name 		qdf_nbuf_track_free(p_node);
3430*5113495bSYour Name 	} else {
3431*5113495bSYour Name 		if (qdf_net_buf_track_fail_count) {
3432*5113495bSYour Name 			qdf_print("Untracked net_buf free: %pK with tracking failures count: %u",
3433*5113495bSYour Name 				  net_buf, qdf_net_buf_track_fail_count);
3434*5113495bSYour Name 		} else
3435*5113495bSYour Name 			QDF_MEMDEBUG_PANIC("Unallocated buffer ! Double free of net_buf %pK ?",
3436*5113495bSYour Name 					   net_buf);
3437*5113495bSYour Name 	}
3438*5113495bSYour Name }
3439*5113495bSYour Name qdf_export_symbol(qdf_net_buf_debug_delete_node);
3440*5113495bSYour Name 
qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,const char * func_name,uint32_t line_num)3441*5113495bSYour Name void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,
3442*5113495bSYour Name 				   const char *func_name, uint32_t line_num)
3443*5113495bSYour Name {
3444*5113495bSYour Name 	qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
3445*5113495bSYour Name 
3446*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
3447*5113495bSYour Name 		return;
3448*5113495bSYour Name 
3449*5113495bSYour Name 	while (ext_list) {
3450*5113495bSYour Name 		/*
3451*5113495bSYour Name 		 * Take care to add if it is Jumbo packet connected using
3452*5113495bSYour Name 		 * frag_list
3453*5113495bSYour Name 		 */
3454*5113495bSYour Name 		qdf_nbuf_t next;
3455*5113495bSYour Name 
3456*5113495bSYour Name 		next = qdf_nbuf_queue_next(ext_list);
3457*5113495bSYour Name 		qdf_net_buf_debug_add_node(ext_list, 0, func_name, line_num);
3458*5113495bSYour Name 		ext_list = next;
3459*5113495bSYour Name 	}
3460*5113495bSYour Name 	qdf_net_buf_debug_add_node(net_buf, 0, func_name, line_num);
3461*5113495bSYour Name }
3462*5113495bSYour Name qdf_export_symbol(qdf_net_buf_debug_acquire_skb);
3463*5113495bSYour Name 
qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)3464*5113495bSYour Name void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
3465*5113495bSYour Name {
3466*5113495bSYour Name 	qdf_nbuf_t ext_list;
3467*5113495bSYour Name 
3468*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
3469*5113495bSYour Name 		return;
3470*5113495bSYour Name 
3471*5113495bSYour Name 	ext_list = qdf_nbuf_get_ext_list(net_buf);
3472*5113495bSYour Name 	while (ext_list) {
3473*5113495bSYour Name 		/*
3474*5113495bSYour Name 		 * Take care to free if it is Jumbo packet connected using
3475*5113495bSYour Name 		 * frag_list
3476*5113495bSYour Name 		 */
3477*5113495bSYour Name 		qdf_nbuf_t next;
3478*5113495bSYour Name 
3479*5113495bSYour Name 		next = qdf_nbuf_queue_next(ext_list);
3480*5113495bSYour Name 
3481*5113495bSYour Name 		if (qdf_nbuf_get_users(ext_list) > 1) {
3482*5113495bSYour Name 			ext_list = next;
3483*5113495bSYour Name 			continue;
3484*5113495bSYour Name 		}
3485*5113495bSYour Name 
3486*5113495bSYour Name 		qdf_net_buf_debug_delete_node(ext_list);
3487*5113495bSYour Name 		ext_list = next;
3488*5113495bSYour Name 	}
3489*5113495bSYour Name 
3490*5113495bSYour Name 	if (qdf_nbuf_get_users(net_buf) > 1)
3491*5113495bSYour Name 		return;
3492*5113495bSYour Name 
3493*5113495bSYour Name 	qdf_net_buf_debug_delete_node(net_buf);
3494*5113495bSYour Name }
3495*5113495bSYour Name qdf_export_symbol(qdf_net_buf_debug_release_skb);
3496*5113495bSYour Name 
qdf_nbuf_alloc_debug(qdf_device_t osdev,qdf_size_t size,int reserve,int align,int prio,const char * func,uint32_t line)3497*5113495bSYour Name qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
3498*5113495bSYour Name 				int reserve, int align, int prio,
3499*5113495bSYour Name 				const char *func, uint32_t line)
3500*5113495bSYour Name {
3501*5113495bSYour Name 	qdf_nbuf_t nbuf;
3502*5113495bSYour Name 
3503*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
3504*5113495bSYour Name 		return __qdf_nbuf_alloc(osdev, size,
3505*5113495bSYour Name 					reserve, align,
3506*5113495bSYour Name 					prio, func, line);
3507*5113495bSYour Name 
3508*5113495bSYour Name 	nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio, func, line);
3509*5113495bSYour Name 
3510*5113495bSYour Name 	/* Store SKB in internal QDF tracking table */
3511*5113495bSYour Name 	if (qdf_likely(nbuf)) {
3512*5113495bSYour Name 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
3513*5113495bSYour Name 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
3514*5113495bSYour Name 	} else {
3515*5113495bSYour Name 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
3516*5113495bSYour Name 	}
3517*5113495bSYour Name 
3518*5113495bSYour Name 	return nbuf;
3519*5113495bSYour Name }
3520*5113495bSYour Name qdf_export_symbol(qdf_nbuf_alloc_debug);
3521*5113495bSYour Name 
qdf_nbuf_frag_alloc_debug(qdf_device_t osdev,qdf_size_t size,int reserve,int align,int prio,const char * func,uint32_t line)3522*5113495bSYour Name qdf_nbuf_t qdf_nbuf_frag_alloc_debug(qdf_device_t osdev, qdf_size_t size,
3523*5113495bSYour Name 				     int reserve, int align, int prio,
3524*5113495bSYour Name 				     const char *func, uint32_t line)
3525*5113495bSYour Name {
3526*5113495bSYour Name 	qdf_nbuf_t nbuf;
3527*5113495bSYour Name 
3528*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
3529*5113495bSYour Name 		return __qdf_nbuf_frag_alloc(osdev, size,
3530*5113495bSYour Name 					reserve, align,
3531*5113495bSYour Name 					prio, func, line);
3532*5113495bSYour Name 
3533*5113495bSYour Name 	nbuf = __qdf_nbuf_frag_alloc(osdev, size, reserve, align, prio,
3534*5113495bSYour Name 				     func, line);
3535*5113495bSYour Name 
3536*5113495bSYour Name 	/* Store SKB in internal QDF tracking table */
3537*5113495bSYour Name 	if (qdf_likely(nbuf)) {
3538*5113495bSYour Name 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
3539*5113495bSYour Name 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
3540*5113495bSYour Name 	} else {
3541*5113495bSYour Name 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
3542*5113495bSYour Name 	}
3543*5113495bSYour Name 
3544*5113495bSYour Name 	return nbuf;
3545*5113495bSYour Name }
3546*5113495bSYour Name 
3547*5113495bSYour Name qdf_export_symbol(qdf_nbuf_frag_alloc_debug);
3548*5113495bSYour Name 
qdf_nbuf_alloc_no_recycler_debug(size_t size,int reserve,int align,const char * func,uint32_t line)3549*5113495bSYour Name qdf_nbuf_t qdf_nbuf_alloc_no_recycler_debug(size_t size, int reserve, int align,
3550*5113495bSYour Name 					    const char *func, uint32_t line)
3551*5113495bSYour Name {
3552*5113495bSYour Name 	qdf_nbuf_t nbuf;
3553*5113495bSYour Name 
3554*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
3555*5113495bSYour Name 		return __qdf_nbuf_alloc_no_recycler(size, reserve, align, func,
3556*5113495bSYour Name 						    line);
3557*5113495bSYour Name 
3558*5113495bSYour Name 	nbuf = __qdf_nbuf_alloc_no_recycler(size, reserve, align, func, line);
3559*5113495bSYour Name 
3560*5113495bSYour Name 	/* Store SKB in internal QDF tracking table */
3561*5113495bSYour Name 	if (qdf_likely(nbuf)) {
3562*5113495bSYour Name 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
3563*5113495bSYour Name 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
3564*5113495bSYour Name 	} else {
3565*5113495bSYour Name 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
3566*5113495bSYour Name 	}
3567*5113495bSYour Name 
3568*5113495bSYour Name 	return nbuf;
3569*5113495bSYour Name }
3570*5113495bSYour Name 
3571*5113495bSYour Name qdf_export_symbol(qdf_nbuf_alloc_no_recycler_debug);
3572*5113495bSYour Name 
qdf_nbuf_free_debug(qdf_nbuf_t nbuf,const char * func,uint32_t line)3573*5113495bSYour Name void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, const char *func, uint32_t line)
3574*5113495bSYour Name {
3575*5113495bSYour Name 	qdf_nbuf_t ext_list;
3576*5113495bSYour Name 	qdf_frag_t p_frag;
3577*5113495bSYour Name 	uint32_t num_nr_frags;
3578*5113495bSYour Name 	uint32_t idx = 0;
3579*5113495bSYour Name 
3580*5113495bSYour Name 	if (qdf_unlikely(!nbuf))
3581*5113495bSYour Name 		return;
3582*5113495bSYour Name 
3583*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
3584*5113495bSYour Name 		goto free_buf;
3585*5113495bSYour Name 
3586*5113495bSYour Name 	if (qdf_nbuf_get_users(nbuf) > 1)
3587*5113495bSYour Name 		goto free_buf;
3588*5113495bSYour Name 
3589*5113495bSYour Name 	/* Remove SKB from internal QDF tracking table */
3590*5113495bSYour Name 	qdf_nbuf_panic_on_free_if_smmu_mapped(nbuf, func, line);
3591*5113495bSYour Name 	qdf_nbuf_panic_on_free_if_mapped(nbuf, func, line);
3592*5113495bSYour Name 	qdf_net_buf_debug_delete_node(nbuf);
3593*5113495bSYour Name 	qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_FREE);
3594*5113495bSYour Name 
3595*5113495bSYour Name 	/* Take care to delete the debug entries for frags */
3596*5113495bSYour Name 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
3597*5113495bSYour Name 
3598*5113495bSYour Name 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3599*5113495bSYour Name 
3600*5113495bSYour Name 	while (idx < num_nr_frags) {
3601*5113495bSYour Name 		p_frag = qdf_nbuf_get_frag_addr(nbuf, idx);
3602*5113495bSYour Name 		if (qdf_likely(p_frag))
3603*5113495bSYour Name 			qdf_frag_debug_refcount_dec(p_frag, func, line);
3604*5113495bSYour Name 		idx++;
3605*5113495bSYour Name 	}
3606*5113495bSYour Name 
3607*5113495bSYour Name 	/*
3608*5113495bSYour Name 	 * Take care to update the debug entries for frag_list and also
3609*5113495bSYour Name 	 * for the frags attached to frag_list
3610*5113495bSYour Name 	 */
3611*5113495bSYour Name 	ext_list = qdf_nbuf_get_ext_list(nbuf);
3612*5113495bSYour Name 	while (ext_list) {
3613*5113495bSYour Name 		if (qdf_nbuf_get_users(ext_list) == 1) {
3614*5113495bSYour Name 			qdf_nbuf_panic_on_free_if_smmu_mapped(ext_list, func,
3615*5113495bSYour Name 							      line);
3616*5113495bSYour Name 			qdf_nbuf_panic_on_free_if_mapped(ext_list, func, line);
3617*5113495bSYour Name 			idx = 0;
3618*5113495bSYour Name 			num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
3619*5113495bSYour Name 			qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3620*5113495bSYour Name 			while (idx < num_nr_frags) {
3621*5113495bSYour Name 				p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
3622*5113495bSYour Name 				if (qdf_likely(p_frag))
3623*5113495bSYour Name 					qdf_frag_debug_refcount_dec(p_frag,
3624*5113495bSYour Name 								    func, line);
3625*5113495bSYour Name 				idx++;
3626*5113495bSYour Name 			}
3627*5113495bSYour Name 			qdf_net_buf_debug_delete_node(ext_list);
3628*5113495bSYour Name 		}
3629*5113495bSYour Name 
3630*5113495bSYour Name 		ext_list = qdf_nbuf_queue_next(ext_list);
3631*5113495bSYour Name 	}
3632*5113495bSYour Name 
3633*5113495bSYour Name free_buf:
3634*5113495bSYour Name 	__qdf_nbuf_free(nbuf);
3635*5113495bSYour Name }
3636*5113495bSYour Name qdf_export_symbol(qdf_nbuf_free_debug);
3637*5113495bSYour Name 
__qdf_nbuf_alloc_simple(qdf_device_t osdev,size_t size,const char * func,uint32_t line)3638*5113495bSYour Name struct sk_buff *__qdf_nbuf_alloc_simple(qdf_device_t osdev, size_t size,
3639*5113495bSYour Name 					const char *func, uint32_t line)
3640*5113495bSYour Name {
3641*5113495bSYour Name 	struct sk_buff *skb;
3642*5113495bSYour Name 	int flags = GFP_KERNEL;
3643*5113495bSYour Name 
3644*5113495bSYour Name 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
3645*5113495bSYour Name 		flags = GFP_ATOMIC;
3646*5113495bSYour Name #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
3647*5113495bSYour Name 		/*
3648*5113495bSYour Name 		 * Observed that kcompactd burns out CPU to make order-3 page.
3649*5113495bSYour Name 		 *__netdev_alloc_skb has 4k page fallback option just in case of
3650*5113495bSYour Name 		 * failing high order page allocation so we don't need to be
3651*5113495bSYour Name 		 * hard. Make kcompactd rest in piece.
3652*5113495bSYour Name 		 */
3653*5113495bSYour Name 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
3654*5113495bSYour Name #endif
3655*5113495bSYour Name 	}
3656*5113495bSYour Name 
3657*5113495bSYour Name 	skb = __netdev_alloc_skb(NULL, size, flags);
3658*5113495bSYour Name 
3659*5113495bSYour Name 
3660*5113495bSYour Name 	if (qdf_likely(is_initial_mem_debug_disabled)) {
3661*5113495bSYour Name 		if (qdf_likely(skb))
3662*5113495bSYour Name 			qdf_nbuf_count_inc(skb);
3663*5113495bSYour Name 	} else {
3664*5113495bSYour Name 		if (qdf_likely(skb)) {
3665*5113495bSYour Name 			qdf_nbuf_count_inc(skb);
3666*5113495bSYour Name 			qdf_net_buf_debug_add_node(skb, size, func, line);
3667*5113495bSYour Name 			qdf_nbuf_history_add(skb, func, line, QDF_NBUF_ALLOC);
3668*5113495bSYour Name 		} else {
3669*5113495bSYour Name 			qdf_nbuf_history_add(skb, func, line, QDF_NBUF_ALLOC_FAILURE);
3670*5113495bSYour Name 		}
3671*5113495bSYour Name 	}
3672*5113495bSYour Name 
3673*5113495bSYour Name 
3674*5113495bSYour Name 	return skb;
3675*5113495bSYour Name }
3676*5113495bSYour Name 
3677*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_alloc_simple);
3678*5113495bSYour Name 
qdf_nbuf_free_debug_simple(qdf_nbuf_t nbuf,const char * func,uint32_t line)3679*5113495bSYour Name void qdf_nbuf_free_debug_simple(qdf_nbuf_t nbuf, const char *func,
3680*5113495bSYour Name 				uint32_t line)
3681*5113495bSYour Name {
3682*5113495bSYour Name 	if (qdf_likely(nbuf)) {
3683*5113495bSYour Name 		if (is_initial_mem_debug_disabled) {
3684*5113495bSYour Name 			dev_kfree_skb_any(nbuf);
3685*5113495bSYour Name 		} else {
3686*5113495bSYour Name 			qdf_nbuf_free_debug(nbuf, func, line);
3687*5113495bSYour Name 		}
3688*5113495bSYour Name 	}
3689*5113495bSYour Name }
3690*5113495bSYour Name 
3691*5113495bSYour Name qdf_export_symbol(qdf_nbuf_free_debug_simple);
3692*5113495bSYour Name 
qdf_nbuf_clone_debug(qdf_nbuf_t buf,const char * func,uint32_t line)3693*5113495bSYour Name qdf_nbuf_t qdf_nbuf_clone_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
3694*5113495bSYour Name {
3695*5113495bSYour Name 	uint32_t num_nr_frags;
3696*5113495bSYour Name 	uint32_t idx = 0;
3697*5113495bSYour Name 	qdf_nbuf_t ext_list;
3698*5113495bSYour Name 	qdf_frag_t p_frag;
3699*5113495bSYour Name 
3700*5113495bSYour Name 	qdf_nbuf_t cloned_buf = __qdf_nbuf_clone(buf);
3701*5113495bSYour Name 
3702*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
3703*5113495bSYour Name 		return cloned_buf;
3704*5113495bSYour Name 
3705*5113495bSYour Name 	if (qdf_unlikely(!cloned_buf))
3706*5113495bSYour Name 		return NULL;
3707*5113495bSYour Name 
3708*5113495bSYour Name 	/* Take care to update the debug entries for frags */
3709*5113495bSYour Name 	num_nr_frags = qdf_nbuf_get_nr_frags(cloned_buf);
3710*5113495bSYour Name 
3711*5113495bSYour Name 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3712*5113495bSYour Name 
3713*5113495bSYour Name 	while (idx < num_nr_frags) {
3714*5113495bSYour Name 		p_frag = qdf_nbuf_get_frag_addr(cloned_buf, idx);
3715*5113495bSYour Name 		if (qdf_likely(p_frag))
3716*5113495bSYour Name 			qdf_frag_debug_refcount_inc(p_frag, func, line);
3717*5113495bSYour Name 		idx++;
3718*5113495bSYour Name 	}
3719*5113495bSYour Name 
3720*5113495bSYour Name 	/* Take care to update debug entries for frags attached to frag_list */
3721*5113495bSYour Name 	ext_list = qdf_nbuf_get_ext_list(cloned_buf);
3722*5113495bSYour Name 	while (ext_list) {
3723*5113495bSYour Name 		idx = 0;
3724*5113495bSYour Name 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
3725*5113495bSYour Name 
3726*5113495bSYour Name 		qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
3727*5113495bSYour Name 
3728*5113495bSYour Name 		while (idx < num_nr_frags) {
3729*5113495bSYour Name 			p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
3730*5113495bSYour Name 			if (qdf_likely(p_frag))
3731*5113495bSYour Name 				qdf_frag_debug_refcount_inc(p_frag, func, line);
3732*5113495bSYour Name 			idx++;
3733*5113495bSYour Name 		}
3734*5113495bSYour Name 		ext_list = qdf_nbuf_queue_next(ext_list);
3735*5113495bSYour Name 	}
3736*5113495bSYour Name 
3737*5113495bSYour Name 	/* Store SKB in internal QDF tracking table */
3738*5113495bSYour Name 	qdf_net_buf_debug_add_node(cloned_buf, 0, func, line);
3739*5113495bSYour Name 	qdf_nbuf_history_add(cloned_buf, func, line, QDF_NBUF_ALLOC_CLONE);
3740*5113495bSYour Name 
3741*5113495bSYour Name 	return cloned_buf;
3742*5113495bSYour Name }
3743*5113495bSYour Name qdf_export_symbol(qdf_nbuf_clone_debug);
3744*5113495bSYour Name 
3745*5113495bSYour Name qdf_nbuf_t
qdf_nbuf_page_frag_alloc_debug(qdf_device_t osdev,qdf_size_t size,int reserve,int align,__qdf_frag_cache_t * pf_cache,const char * func,uint32_t line)3746*5113495bSYour Name qdf_nbuf_page_frag_alloc_debug(qdf_device_t osdev, qdf_size_t size, int reserve,
3747*5113495bSYour Name 			       int align, __qdf_frag_cache_t *pf_cache,
3748*5113495bSYour Name 			       const char *func, uint32_t line)
3749*5113495bSYour Name {
3750*5113495bSYour Name 	qdf_nbuf_t nbuf;
3751*5113495bSYour Name 
3752*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
3753*5113495bSYour Name 		return __qdf_nbuf_page_frag_alloc(osdev, size, reserve, align,
3754*5113495bSYour Name 						  pf_cache, func, line);
3755*5113495bSYour Name 
3756*5113495bSYour Name 	nbuf = __qdf_nbuf_page_frag_alloc(osdev, size, reserve, align,
3757*5113495bSYour Name 					  pf_cache, func, line);
3758*5113495bSYour Name 
3759*5113495bSYour Name 	/* Store SKB in internal QDF tracking table */
3760*5113495bSYour Name 	if (qdf_likely(nbuf)) {
3761*5113495bSYour Name 		qdf_net_buf_debug_add_node(nbuf, size, func, line);
3762*5113495bSYour Name 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
3763*5113495bSYour Name 	} else {
3764*5113495bSYour Name 		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
3765*5113495bSYour Name 	}
3766*5113495bSYour Name 
3767*5113495bSYour Name 	return nbuf;
3768*5113495bSYour Name }
3769*5113495bSYour Name 
3770*5113495bSYour Name qdf_export_symbol(qdf_nbuf_page_frag_alloc_debug);
3771*5113495bSYour Name 
qdf_nbuf_copy_debug(qdf_nbuf_t buf,const char * func,uint32_t line)3772*5113495bSYour Name qdf_nbuf_t qdf_nbuf_copy_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
3773*5113495bSYour Name {
3774*5113495bSYour Name 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy(buf);
3775*5113495bSYour Name 
3776*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
3777*5113495bSYour Name 		return copied_buf;
3778*5113495bSYour Name 
3779*5113495bSYour Name 	if (qdf_unlikely(!copied_buf))
3780*5113495bSYour Name 		return NULL;
3781*5113495bSYour Name 
3782*5113495bSYour Name 	/* Store SKB in internal QDF tracking table */
3783*5113495bSYour Name 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
3784*5113495bSYour Name 	qdf_nbuf_history_add(copied_buf, func, line, QDF_NBUF_ALLOC_COPY);
3785*5113495bSYour Name 
3786*5113495bSYour Name 	return copied_buf;
3787*5113495bSYour Name }
3788*5113495bSYour Name qdf_export_symbol(qdf_nbuf_copy_debug);
3789*5113495bSYour Name 
3790*5113495bSYour Name qdf_nbuf_t
qdf_nbuf_copy_expand_debug(qdf_nbuf_t buf,int headroom,int tailroom,const char * func,uint32_t line)3791*5113495bSYour Name qdf_nbuf_copy_expand_debug(qdf_nbuf_t buf, int headroom, int tailroom,
3792*5113495bSYour Name 			   const char *func, uint32_t line)
3793*5113495bSYour Name {
3794*5113495bSYour Name 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy_expand(buf, headroom, tailroom);
3795*5113495bSYour Name 
3796*5113495bSYour Name 	if (qdf_unlikely(!copied_buf))
3797*5113495bSYour Name 		return NULL;
3798*5113495bSYour Name 
3799*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
3800*5113495bSYour Name 		return copied_buf;
3801*5113495bSYour Name 
3802*5113495bSYour Name 	/* Store SKB in internal QDF tracking table */
3803*5113495bSYour Name 	qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
3804*5113495bSYour Name 	qdf_nbuf_history_add(copied_buf, func, line,
3805*5113495bSYour Name 			     QDF_NBUF_ALLOC_COPY_EXPAND);
3806*5113495bSYour Name 
3807*5113495bSYour Name 	return copied_buf;
3808*5113495bSYour Name }
3809*5113495bSYour Name 
3810*5113495bSYour Name qdf_export_symbol(qdf_nbuf_copy_expand_debug);
3811*5113495bSYour Name 
3812*5113495bSYour Name qdf_nbuf_t
qdf_nbuf_unshare_debug(qdf_nbuf_t buf,const char * func_name,uint32_t line_num)3813*5113495bSYour Name qdf_nbuf_unshare_debug(qdf_nbuf_t buf, const char *func_name,
3814*5113495bSYour Name 		       uint32_t line_num)
3815*5113495bSYour Name {
3816*5113495bSYour Name 	qdf_nbuf_t unshared_buf;
3817*5113495bSYour Name 	qdf_frag_t p_frag;
3818*5113495bSYour Name 	uint32_t num_nr_frags;
3819*5113495bSYour Name 	uint32_t idx = 0;
3820*5113495bSYour Name 	qdf_nbuf_t ext_list, next;
3821*5113495bSYour Name 
3822*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
3823*5113495bSYour Name 		return __qdf_nbuf_unshare(buf);
3824*5113495bSYour Name 
3825*5113495bSYour Name 	/* Not a shared buffer, nothing to do */
3826*5113495bSYour Name 	if (!qdf_nbuf_is_cloned(buf))
3827*5113495bSYour Name 		return buf;
3828*5113495bSYour Name 
3829*5113495bSYour Name 	if (qdf_nbuf_get_users(buf) > 1)
3830*5113495bSYour Name 		goto unshare_buf;
3831*5113495bSYour Name 
3832*5113495bSYour Name 	/* Take care to delete the debug entries for frags */
3833*5113495bSYour Name 	num_nr_frags = qdf_nbuf_get_nr_frags(buf);
3834*5113495bSYour Name 
3835*5113495bSYour Name 	while (idx < num_nr_frags) {
3836*5113495bSYour Name 		p_frag = qdf_nbuf_get_frag_addr(buf, idx);
3837*5113495bSYour Name 		if (qdf_likely(p_frag))
3838*5113495bSYour Name 			qdf_frag_debug_refcount_dec(p_frag, func_name,
3839*5113495bSYour Name 						    line_num);
3840*5113495bSYour Name 		idx++;
3841*5113495bSYour Name 	}
3842*5113495bSYour Name 
3843*5113495bSYour Name 	qdf_net_buf_debug_delete_node(buf);
3844*5113495bSYour Name 
3845*5113495bSYour Name 	 /* Take care of jumbo packet connected using frag_list and frags */
3846*5113495bSYour Name 	ext_list = qdf_nbuf_get_ext_list(buf);
3847*5113495bSYour Name 	while (ext_list) {
3848*5113495bSYour Name 		idx = 0;
3849*5113495bSYour Name 		next = qdf_nbuf_queue_next(ext_list);
3850*5113495bSYour Name 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
3851*5113495bSYour Name 
3852*5113495bSYour Name 		if (qdf_nbuf_get_users(ext_list) > 1) {
3853*5113495bSYour Name 			ext_list = next;
3854*5113495bSYour Name 			continue;
3855*5113495bSYour Name 		}
3856*5113495bSYour Name 
3857*5113495bSYour Name 		while (idx < num_nr_frags) {
3858*5113495bSYour Name 			p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
3859*5113495bSYour Name 			if (qdf_likely(p_frag))
3860*5113495bSYour Name 				qdf_frag_debug_refcount_dec(p_frag, func_name,
3861*5113495bSYour Name 							    line_num);
3862*5113495bSYour Name 			idx++;
3863*5113495bSYour Name 		}
3864*5113495bSYour Name 
3865*5113495bSYour Name 		qdf_net_buf_debug_delete_node(ext_list);
3866*5113495bSYour Name 		ext_list = next;
3867*5113495bSYour Name 	}
3868*5113495bSYour Name 
3869*5113495bSYour Name unshare_buf:
3870*5113495bSYour Name 	unshared_buf = __qdf_nbuf_unshare(buf);
3871*5113495bSYour Name 
3872*5113495bSYour Name 	if (qdf_likely(unshared_buf))
3873*5113495bSYour Name 		qdf_net_buf_debug_add_node(unshared_buf, 0, func_name,
3874*5113495bSYour Name 					   line_num);
3875*5113495bSYour Name 
3876*5113495bSYour Name 	return unshared_buf;
3877*5113495bSYour Name }
3878*5113495bSYour Name 
3879*5113495bSYour Name qdf_export_symbol(qdf_nbuf_unshare_debug);
3880*5113495bSYour Name 
3881*5113495bSYour Name void
qdf_nbuf_dev_kfree_list_debug(__qdf_nbuf_queue_head_t * nbuf_queue_head,const char * func,uint32_t line)3882*5113495bSYour Name qdf_nbuf_dev_kfree_list_debug(__qdf_nbuf_queue_head_t *nbuf_queue_head,
3883*5113495bSYour Name 			      const char *func, uint32_t line)
3884*5113495bSYour Name {
3885*5113495bSYour Name 	qdf_nbuf_t  buf;
3886*5113495bSYour Name 
3887*5113495bSYour Name 	if (qdf_nbuf_queue_empty(nbuf_queue_head))
3888*5113495bSYour Name 		return;
3889*5113495bSYour Name 
3890*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
3891*5113495bSYour Name 		return __qdf_nbuf_dev_kfree_list(nbuf_queue_head);
3892*5113495bSYour Name 
3893*5113495bSYour Name 	while ((buf = qdf_nbuf_queue_head_dequeue(nbuf_queue_head)) != NULL)
3894*5113495bSYour Name 		qdf_nbuf_free_debug(buf, func, line);
3895*5113495bSYour Name }
3896*5113495bSYour Name 
3897*5113495bSYour Name qdf_export_symbol(qdf_nbuf_dev_kfree_list_debug);
3898*5113495bSYour Name #endif /* NBUF_MEMORY_DEBUG */
3899*5113495bSYour Name 
3900*5113495bSYour Name #if defined(QCA_DP_NBUF_FAST_PPEDS)
3901*5113495bSYour Name #if defined(NBUF_MEMORY_DEBUG)
__qdf_nbuf_alloc_ppe_ds(qdf_device_t osdev,size_t size,const char * func,uint32_t line)3902*5113495bSYour Name struct sk_buff *__qdf_nbuf_alloc_ppe_ds(qdf_device_t osdev, size_t size,
3903*5113495bSYour Name 					const char *func, uint32_t line)
3904*5113495bSYour Name {
3905*5113495bSYour Name 	struct sk_buff *skb;
3906*5113495bSYour Name 	int flags = GFP_KERNEL;
3907*5113495bSYour Name 
3908*5113495bSYour Name 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
3909*5113495bSYour Name 		flags = GFP_ATOMIC;
3910*5113495bSYour Name #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
3911*5113495bSYour Name 		/*
3912*5113495bSYour Name 		 * Observed that kcompactd burns out CPU to make order-3
3913*5113495bSYour Name 		 * page.__netdev_alloc_skb has 4k page fallback option
3914*5113495bSYour Name 		 * just in case of
3915*5113495bSYour Name 		 * failing high order page allocation so we don't need
3916*5113495bSYour Name 		 * to be hard. Make kcompactd rest in piece.
3917*5113495bSYour Name 		 */
3918*5113495bSYour Name 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
3919*5113495bSYour Name #endif
3920*5113495bSYour Name 	}
3921*5113495bSYour Name 	skb = __netdev_alloc_skb_no_skb_reset(NULL, size, flags);
3922*5113495bSYour Name 	if (qdf_likely(is_initial_mem_debug_disabled)) {
3923*5113495bSYour Name 		if (qdf_likely(skb))
3924*5113495bSYour Name 			qdf_nbuf_count_inc(skb);
3925*5113495bSYour Name 	} else {
3926*5113495bSYour Name 		if (qdf_likely(skb)) {
3927*5113495bSYour Name 			qdf_nbuf_count_inc(skb);
3928*5113495bSYour Name 			qdf_net_buf_debug_add_node(skb, size, func, line);
3929*5113495bSYour Name 			qdf_nbuf_history_add(skb, func, line,
3930*5113495bSYour Name 					     QDF_NBUF_ALLOC);
3931*5113495bSYour Name 		} else {
3932*5113495bSYour Name 			qdf_nbuf_history_add(skb, func, line,
3933*5113495bSYour Name 					     QDF_NBUF_ALLOC_FAILURE);
3934*5113495bSYour Name 		}
3935*5113495bSYour Name 	}
3936*5113495bSYour Name 	return skb;
3937*5113495bSYour Name }
3938*5113495bSYour Name #else
__qdf_nbuf_alloc_ppe_ds(qdf_device_t osdev,size_t size,const char * func,uint32_t line)3939*5113495bSYour Name struct sk_buff *__qdf_nbuf_alloc_ppe_ds(qdf_device_t osdev, size_t size,
3940*5113495bSYour Name 					const char *func, uint32_t line)
3941*5113495bSYour Name {
3942*5113495bSYour Name 	struct sk_buff *skb;
3943*5113495bSYour Name 	int flags = GFP_KERNEL;
3944*5113495bSYour Name 
3945*5113495bSYour Name 	if (in_interrupt() || irqs_disabled() || in_atomic()) {
3946*5113495bSYour Name 		flags = GFP_ATOMIC;
3947*5113495bSYour Name #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
3948*5113495bSYour Name 		/*
3949*5113495bSYour Name 		 * Observed that kcompactd burns out CPU to make order-3
3950*5113495bSYour Name 		 * page.__netdev_alloc_skb has 4k page fallback option
3951*5113495bSYour Name 		 * just in case of
3952*5113495bSYour Name 		 * failing high order page allocation so we don't need
3953*5113495bSYour Name 		 * to be hard. Make kcompactd rest in piece.
3954*5113495bSYour Name 		 */
3955*5113495bSYour Name 		flags = flags & ~__GFP_KSWAPD_RECLAIM;
3956*5113495bSYour Name #endif
3957*5113495bSYour Name 	}
3958*5113495bSYour Name 	skb = __netdev_alloc_skb_no_skb_reset(NULL, size, flags);
3959*5113495bSYour Name 	if (qdf_likely(skb))
3960*5113495bSYour Name 		qdf_nbuf_count_inc(skb);
3961*5113495bSYour Name 
3962*5113495bSYour Name 	return skb;
3963*5113495bSYour Name }
3964*5113495bSYour Name #endif
3965*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_alloc_ppe_ds);
3966*5113495bSYour Name #endif
3967*5113495bSYour Name 
3968*5113495bSYour Name #if defined(FEATURE_TSO)
3969*5113495bSYour Name 
3970*5113495bSYour Name /**
3971*5113495bSYour Name  * struct qdf_tso_cmn_seg_info_t - TSO common info structure
3972*5113495bSYour Name  *
3973*5113495bSYour Name  * @ethproto: ethernet type of the msdu
3974*5113495bSYour Name  * @ip_tcp_hdr_len: ip + tcp length for the msdu
3975*5113495bSYour Name  * @l2_len: L2 length for the msdu
3976*5113495bSYour Name  * @eit_hdr: pointer to EIT header
3977*5113495bSYour Name  * @eit_hdr_len: EIT header length for the msdu
3978*5113495bSYour Name  * @eit_hdr_dma_map_addr: dma addr for EIT header
3979*5113495bSYour Name  * @tcphdr: pointer to tcp header
3980*5113495bSYour Name  * @ipv4_csum_en: ipv4 checksum enable
3981*5113495bSYour Name  * @tcp_ipv4_csum_en: TCP ipv4 checksum enable
3982*5113495bSYour Name  * @tcp_ipv6_csum_en: TCP ipv6 checksum enable
3983*5113495bSYour Name  * @ip_id: IP id
3984*5113495bSYour Name  * @tcp_seq_num: TCP sequence number
3985*5113495bSYour Name  *
3986*5113495bSYour Name  * This structure holds the TSO common info that is common
3987*5113495bSYour Name  * across all the TCP segments of the jumbo packet.
3988*5113495bSYour Name  */
3989*5113495bSYour Name struct qdf_tso_cmn_seg_info_t {
3990*5113495bSYour Name 	uint16_t ethproto;
3991*5113495bSYour Name 	uint16_t ip_tcp_hdr_len;
3992*5113495bSYour Name 	uint16_t l2_len;
3993*5113495bSYour Name 	uint8_t *eit_hdr;
3994*5113495bSYour Name 	uint32_t eit_hdr_len;
3995*5113495bSYour Name 	qdf_dma_addr_t eit_hdr_dma_map_addr;
3996*5113495bSYour Name 	struct tcphdr *tcphdr;
3997*5113495bSYour Name 	uint16_t ipv4_csum_en;
3998*5113495bSYour Name 	uint16_t tcp_ipv4_csum_en;
3999*5113495bSYour Name 	uint16_t tcp_ipv6_csum_en;
4000*5113495bSYour Name 	uint16_t ip_id;
4001*5113495bSYour Name 	uint32_t tcp_seq_num;
4002*5113495bSYour Name };
4003*5113495bSYour Name 
4004*5113495bSYour Name /**
4005*5113495bSYour Name  * qdf_nbuf_adj_tso_frag() - adjustment for buffer address of tso fragment
4006*5113495bSYour Name  * @skb: network buffer
4007*5113495bSYour Name  *
4008*5113495bSYour Name  * Return: byte offset length of 8 bytes aligned.
4009*5113495bSYour Name  */
4010*5113495bSYour Name #ifdef FIX_TXDMA_LIMITATION
qdf_nbuf_adj_tso_frag(struct sk_buff * skb)4011*5113495bSYour Name static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb)
4012*5113495bSYour Name {
4013*5113495bSYour Name 	uint32_t eit_hdr_len;
4014*5113495bSYour Name 	uint8_t *eit_hdr;
4015*5113495bSYour Name 	uint8_t byte_8_align_offset;
4016*5113495bSYour Name 
4017*5113495bSYour Name 	eit_hdr = skb->data;
4018*5113495bSYour Name 	eit_hdr_len = (skb_transport_header(skb)
4019*5113495bSYour Name 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
4020*5113495bSYour Name 	byte_8_align_offset = ((unsigned long)(eit_hdr) + eit_hdr_len) & 0x7L;
4021*5113495bSYour Name 	if (qdf_unlikely(byte_8_align_offset)) {
4022*5113495bSYour Name 		TSO_DEBUG("%pK,Len %d %d",
4023*5113495bSYour Name 			  eit_hdr, eit_hdr_len, byte_8_align_offset);
4024*5113495bSYour Name 		if (unlikely(skb_headroom(skb) < byte_8_align_offset)) {
4025*5113495bSYour Name 			TSO_DEBUG("[%d]Insufficient headroom,[%pK],[%pK],[%d]",
4026*5113495bSYour Name 				  __LINE__, skb->head, skb->data,
4027*5113495bSYour Name 				 byte_8_align_offset);
4028*5113495bSYour Name 			return 0;
4029*5113495bSYour Name 		}
4030*5113495bSYour Name 		qdf_nbuf_push_head(skb, byte_8_align_offset);
4031*5113495bSYour Name 		qdf_mem_move(skb->data,
4032*5113495bSYour Name 			     skb->data + byte_8_align_offset,
4033*5113495bSYour Name 			     eit_hdr_len);
4034*5113495bSYour Name 		skb->len -= byte_8_align_offset;
4035*5113495bSYour Name 		skb->mac_header -= byte_8_align_offset;
4036*5113495bSYour Name 		skb->network_header -= byte_8_align_offset;
4037*5113495bSYour Name 		skb->transport_header -= byte_8_align_offset;
4038*5113495bSYour Name 	}
4039*5113495bSYour Name 	return byte_8_align_offset;
4040*5113495bSYour Name }
4041*5113495bSYour Name #else
qdf_nbuf_adj_tso_frag(struct sk_buff * skb)4042*5113495bSYour Name static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb)
4043*5113495bSYour Name {
4044*5113495bSYour Name 	return 0;
4045*5113495bSYour Name }
4046*5113495bSYour Name #endif
4047*5113495bSYour Name 
4048*5113495bSYour Name #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
qdf_record_nbuf_nbytes(uint32_t nbytes,qdf_dma_dir_t dir,bool is_mapped)4049*5113495bSYour Name void qdf_record_nbuf_nbytes(
4050*5113495bSYour Name 	uint32_t nbytes, qdf_dma_dir_t dir, bool is_mapped)
4051*5113495bSYour Name {
4052*5113495bSYour Name 	__qdf_record_nbuf_nbytes(nbytes, dir, is_mapped);
4053*5113495bSYour Name }
4054*5113495bSYour Name 
4055*5113495bSYour Name qdf_export_symbol(qdf_record_nbuf_nbytes);
4056*5113495bSYour Name 
4057*5113495bSYour Name #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
4058*5113495bSYour Name 
4059*5113495bSYour Name /**
4060*5113495bSYour Name  * qdf_nbuf_tso_map_frag() - Map TSO segment
4061*5113495bSYour Name  * @osdev: qdf device handle
4062*5113495bSYour Name  * @tso_frag_vaddr: addr of tso fragment
4063*5113495bSYour Name  * @nbytes: number of bytes
4064*5113495bSYour Name  * @dir: direction
4065*5113495bSYour Name  *
4066*5113495bSYour Name  * Map TSO segment and for MCL record the amount of memory mapped
4067*5113495bSYour Name  *
4068*5113495bSYour Name  * Return: DMA address of mapped TSO fragment in success and
4069*5113495bSYour Name  * NULL in case of DMA mapping failure
4070*5113495bSYour Name  */
qdf_nbuf_tso_map_frag(qdf_device_t osdev,void * tso_frag_vaddr,uint32_t nbytes,qdf_dma_dir_t dir)4071*5113495bSYour Name static inline qdf_dma_addr_t qdf_nbuf_tso_map_frag(
4072*5113495bSYour Name 	qdf_device_t osdev, void *tso_frag_vaddr,
4073*5113495bSYour Name 	uint32_t nbytes, qdf_dma_dir_t dir)
4074*5113495bSYour Name {
4075*5113495bSYour Name 	qdf_dma_addr_t tso_frag_paddr = 0;
4076*5113495bSYour Name 
4077*5113495bSYour Name 	tso_frag_paddr = dma_map_single(osdev->dev, tso_frag_vaddr,
4078*5113495bSYour Name 					nbytes, __qdf_dma_dir_to_os(dir));
4079*5113495bSYour Name 	if (unlikely(dma_mapping_error(osdev->dev, tso_frag_paddr))) {
4080*5113495bSYour Name 		qdf_err("DMA mapping error!");
4081*5113495bSYour Name 		qdf_assert_always(0);
4082*5113495bSYour Name 		return 0;
4083*5113495bSYour Name 	}
4084*5113495bSYour Name 	qdf_record_nbuf_nbytes(nbytes, dir, true);
4085*5113495bSYour Name 	return tso_frag_paddr;
4086*5113495bSYour Name }
4087*5113495bSYour Name 
4088*5113495bSYour Name /**
4089*5113495bSYour Name  * qdf_nbuf_tso_unmap_frag() - Unmap TSO segment
4090*5113495bSYour Name  * @osdev: qdf device handle
4091*5113495bSYour Name  * @tso_frag_paddr: DMA addr of tso fragment
4092*5113495bSYour Name  * @dir: direction
4093*5113495bSYour Name  * @nbytes: number of bytes
4094*5113495bSYour Name  *
4095*5113495bSYour Name  * Unmap TSO segment and for MCL record the amount of memory mapped
4096*5113495bSYour Name  *
4097*5113495bSYour Name  * Return: None
4098*5113495bSYour Name  */
qdf_nbuf_tso_unmap_frag(qdf_device_t osdev,qdf_dma_addr_t tso_frag_paddr,uint32_t nbytes,qdf_dma_dir_t dir)4099*5113495bSYour Name static inline void qdf_nbuf_tso_unmap_frag(
4100*5113495bSYour Name 	qdf_device_t osdev, qdf_dma_addr_t tso_frag_paddr,
4101*5113495bSYour Name 	uint32_t nbytes, qdf_dma_dir_t dir)
4102*5113495bSYour Name {
4103*5113495bSYour Name 	qdf_record_nbuf_nbytes(nbytes, dir, false);
4104*5113495bSYour Name 	dma_unmap_single(osdev->dev, tso_frag_paddr,
4105*5113495bSYour Name 			 nbytes, __qdf_dma_dir_to_os(dir));
4106*5113495bSYour Name }
4107*5113495bSYour Name 
4108*5113495bSYour Name /**
4109*5113495bSYour Name  * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
4110*5113495bSYour Name  * information
4111*5113495bSYour Name  * @osdev: qdf device handle
4112*5113495bSYour Name  * @skb: skb buffer
4113*5113495bSYour Name  * @tso_info: Parameters common to all segments
4114*5113495bSYour Name  *
4115*5113495bSYour Name  * Get the TSO information that is common across all the TCP
4116*5113495bSYour Name  * segments of the jumbo packet
4117*5113495bSYour Name  *
4118*5113495bSYour Name  * Return: 0 - success 1 - failure
4119*5113495bSYour Name  */
__qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,struct sk_buff * skb,struct qdf_tso_cmn_seg_info_t * tso_info)4120*5113495bSYour Name static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
4121*5113495bSYour Name 			struct sk_buff *skb,
4122*5113495bSYour Name 			struct qdf_tso_cmn_seg_info_t *tso_info)
4123*5113495bSYour Name {
4124*5113495bSYour Name 	/* Get ethernet type and ethernet header length */
4125*5113495bSYour Name 	tso_info->ethproto = vlan_get_protocol(skb);
4126*5113495bSYour Name 
4127*5113495bSYour Name 	/* Determine whether this is an IPv4 or IPv6 packet */
4128*5113495bSYour Name 	if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
4129*5113495bSYour Name 		/* for IPv4, get the IP ID and enable TCP and IP csum */
4130*5113495bSYour Name 		struct iphdr *ipv4_hdr = ip_hdr(skb);
4131*5113495bSYour Name 
4132*5113495bSYour Name 		tso_info->ip_id = ntohs(ipv4_hdr->id);
4133*5113495bSYour Name 		tso_info->ipv4_csum_en = 1;
4134*5113495bSYour Name 		tso_info->tcp_ipv4_csum_en = 1;
4135*5113495bSYour Name 		if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
4136*5113495bSYour Name 			qdf_err("TSO IPV4 proto 0x%x not TCP",
4137*5113495bSYour Name 				ipv4_hdr->protocol);
4138*5113495bSYour Name 			return 1;
4139*5113495bSYour Name 		}
4140*5113495bSYour Name 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
4141*5113495bSYour Name 		/* for IPv6, enable TCP csum. No IP ID or IP csum */
4142*5113495bSYour Name 		tso_info->tcp_ipv6_csum_en = 1;
4143*5113495bSYour Name 	} else {
4144*5113495bSYour Name 		qdf_err("TSO: ethertype 0x%x is not supported!",
4145*5113495bSYour Name 			tso_info->ethproto);
4146*5113495bSYour Name 		return 1;
4147*5113495bSYour Name 	}
4148*5113495bSYour Name 	tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
4149*5113495bSYour Name 	tso_info->tcphdr = tcp_hdr(skb);
4150*5113495bSYour Name 	tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
4151*5113495bSYour Name 	/* get pointer to the ethernet + IP + TCP header and their length */
4152*5113495bSYour Name 	tso_info->eit_hdr = skb->data;
4153*5113495bSYour Name 	tso_info->eit_hdr_len = (skb_transport_header(skb)
4154*5113495bSYour Name 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
4155*5113495bSYour Name 	tso_info->eit_hdr_dma_map_addr = qdf_nbuf_tso_map_frag(
4156*5113495bSYour Name 						osdev, tso_info->eit_hdr,
4157*5113495bSYour Name 						tso_info->eit_hdr_len,
4158*5113495bSYour Name 						QDF_DMA_TO_DEVICE);
4159*5113495bSYour Name 	if (qdf_unlikely(!tso_info->eit_hdr_dma_map_addr))
4160*5113495bSYour Name 		return 1;
4161*5113495bSYour Name 
4162*5113495bSYour Name 	if (tso_info->ethproto == htons(ETH_P_IP)) {
4163*5113495bSYour Name 		/* include IPv4 header length for IPV4 (total length) */
4164*5113495bSYour Name 		tso_info->ip_tcp_hdr_len =
4165*5113495bSYour Name 			tso_info->eit_hdr_len - tso_info->l2_len;
4166*5113495bSYour Name 	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) {
4167*5113495bSYour Name 		/* exclude IPv6 header length for IPv6 (payload length) */
4168*5113495bSYour Name 		tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb);
4169*5113495bSYour Name 	}
4170*5113495bSYour Name 	/*
4171*5113495bSYour Name 	 * The length of the payload (application layer data) is added to
4172*5113495bSYour Name 	 * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext
4173*5113495bSYour Name 	 * descriptor.
4174*5113495bSYour Name 	 */
4175*5113495bSYour Name 
4176*5113495bSYour Name 	TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u  skb len %u\n", __func__,
4177*5113495bSYour Name 		tso_info->tcp_seq_num,
4178*5113495bSYour Name 		tso_info->eit_hdr_len,
4179*5113495bSYour Name 		tso_info->l2_len,
4180*5113495bSYour Name 		skb->len);
4181*5113495bSYour Name 	return 0;
4182*5113495bSYour Name }
4183*5113495bSYour Name 
4184*5113495bSYour Name 
4185*5113495bSYour Name /**
4186*5113495bSYour Name  * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment
4187*5113495bSYour Name  *
4188*5113495bSYour Name  * @curr_seg: Segment whose contents are initialized
4189*5113495bSYour Name  * @tso_cmn_info: Parameters common to all segments
4190*5113495bSYour Name  *
4191*5113495bSYour Name  * Return: None
4192*5113495bSYour Name  */
__qdf_nbuf_fill_tso_cmn_seg_info(struct qdf_tso_seg_elem_t * curr_seg,struct qdf_tso_cmn_seg_info_t * tso_cmn_info)4193*5113495bSYour Name static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
4194*5113495bSYour Name 				struct qdf_tso_seg_elem_t *curr_seg,
4195*5113495bSYour Name 				struct qdf_tso_cmn_seg_info_t *tso_cmn_info)
4196*5113495bSYour Name {
4197*5113495bSYour Name 	/* Initialize the flags to 0 */
4198*5113495bSYour Name 	memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
4199*5113495bSYour Name 
4200*5113495bSYour Name 	/*
4201*5113495bSYour Name 	 * The following fields remain the same across all segments of
4202*5113495bSYour Name 	 * a jumbo packet
4203*5113495bSYour Name 	 */
4204*5113495bSYour Name 	curr_seg->seg.tso_flags.tso_enable = 1;
4205*5113495bSYour Name 	curr_seg->seg.tso_flags.ipv4_checksum_en =
4206*5113495bSYour Name 		tso_cmn_info->ipv4_csum_en;
4207*5113495bSYour Name 	curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
4208*5113495bSYour Name 		tso_cmn_info->tcp_ipv6_csum_en;
4209*5113495bSYour Name 	curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
4210*5113495bSYour Name 		tso_cmn_info->tcp_ipv4_csum_en;
4211*5113495bSYour Name 	curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
4212*5113495bSYour Name 
4213*5113495bSYour Name 	/* The following fields change for the segments */
4214*5113495bSYour Name 	curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id;
4215*5113495bSYour Name 	tso_cmn_info->ip_id++;
4216*5113495bSYour Name 
4217*5113495bSYour Name 	curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn;
4218*5113495bSYour Name 	curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst;
4219*5113495bSYour Name 	curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack;
4220*5113495bSYour Name 	curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg;
4221*5113495bSYour Name 	curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece;
4222*5113495bSYour Name 	curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr;
4223*5113495bSYour Name 
4224*5113495bSYour Name 	curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num;
4225*5113495bSYour Name 
4226*5113495bSYour Name 	/*
4227*5113495bSYour Name 	 * First fragment for each segment always contains the ethernet,
4228*5113495bSYour Name 	 * IP and TCP header
4229*5113495bSYour Name 	 */
4230*5113495bSYour Name 	curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr;
4231*5113495bSYour Name 	curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len;
4232*5113495bSYour Name 	curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length;
4233*5113495bSYour Name 	curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr;
4234*5113495bSYour Name 
4235*5113495bSYour Name 	TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n",
4236*5113495bSYour Name 		   __func__, __LINE__, tso_cmn_info->eit_hdr,
4237*5113495bSYour Name 		   tso_cmn_info->eit_hdr_len,
4238*5113495bSYour Name 		   curr_seg->seg.tso_flags.tcp_seq_num,
4239*5113495bSYour Name 		   curr_seg->seg.total_len);
4240*5113495bSYour Name 	qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG);
4241*5113495bSYour Name }
4242*5113495bSYour Name 
__qdf_nbuf_get_tso_info(qdf_device_t osdev,struct sk_buff * skb,struct qdf_tso_info_t * tso_info)4243*5113495bSYour Name uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
4244*5113495bSYour Name 		struct qdf_tso_info_t *tso_info)
4245*5113495bSYour Name {
4246*5113495bSYour Name 	/* common across all segments */
4247*5113495bSYour Name 	struct qdf_tso_cmn_seg_info_t tso_cmn_info;
4248*5113495bSYour Name 	/* segment specific */
4249*5113495bSYour Name 	void *tso_frag_vaddr;
4250*5113495bSYour Name 	qdf_dma_addr_t tso_frag_paddr = 0;
4251*5113495bSYour Name 	uint32_t num_seg = 0;
4252*5113495bSYour Name 	struct qdf_tso_seg_elem_t *curr_seg;
4253*5113495bSYour Name 	struct qdf_tso_num_seg_elem_t *total_num_seg;
4254*5113495bSYour Name 	skb_frag_t *frag = NULL;
4255*5113495bSYour Name 	uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
4256*5113495bSYour Name 	uint32_t skb_frag_len = 0; /* skb's fragment length (contiguous memory)*/
4257*5113495bSYour Name 	uint32_t skb_proc = skb->len; /* bytes of skb pending processing */
4258*5113495bSYour Name 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
4259*5113495bSYour Name 	int j = 0; /* skb fragment index */
4260*5113495bSYour Name 	uint8_t byte_8_align_offset;
4261*5113495bSYour Name 
4262*5113495bSYour Name 	memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
4263*5113495bSYour Name 	total_num_seg = tso_info->tso_num_seg_list;
4264*5113495bSYour Name 	curr_seg = tso_info->tso_seg_list;
4265*5113495bSYour Name 	total_num_seg->num_seg.tso_cmn_num_seg = 0;
4266*5113495bSYour Name 
4267*5113495bSYour Name 	byte_8_align_offset = qdf_nbuf_adj_tso_frag(skb);
4268*5113495bSYour Name 
4269*5113495bSYour Name 	if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev,
4270*5113495bSYour Name 						skb, &tso_cmn_info))) {
4271*5113495bSYour Name 		qdf_warn("TSO: error getting common segment info");
4272*5113495bSYour Name 		return 0;
4273*5113495bSYour Name 	}
4274*5113495bSYour Name 
4275*5113495bSYour Name 	/* length of the first chunk of data in the skb */
4276*5113495bSYour Name 	skb_frag_len = skb_headlen(skb);
4277*5113495bSYour Name 
4278*5113495bSYour Name 	/* the 0th tso segment's 0th fragment always contains the EIT header */
4279*5113495bSYour Name 	/* update the remaining skb fragment length and TSO segment length */
4280*5113495bSYour Name 	skb_frag_len -= tso_cmn_info.eit_hdr_len;
4281*5113495bSYour Name 	skb_proc -= tso_cmn_info.eit_hdr_len;
4282*5113495bSYour Name 
4283*5113495bSYour Name 	/* get the address to the next tso fragment */
4284*5113495bSYour Name 	tso_frag_vaddr = skb->data +
4285*5113495bSYour Name 			 tso_cmn_info.eit_hdr_len +
4286*5113495bSYour Name 			 byte_8_align_offset;
4287*5113495bSYour Name 	/* get the length of the next tso fragment */
4288*5113495bSYour Name 	tso_frag_len = min(skb_frag_len, tso_seg_size);
4289*5113495bSYour Name 
4290*5113495bSYour Name 	if (tso_frag_len != 0) {
4291*5113495bSYour Name 		tso_frag_paddr = qdf_nbuf_tso_map_frag(
4292*5113495bSYour Name 					osdev, tso_frag_vaddr, tso_frag_len,
4293*5113495bSYour Name 					QDF_DMA_TO_DEVICE);
4294*5113495bSYour Name 		if (qdf_unlikely(!tso_frag_paddr))
4295*5113495bSYour Name 			return 0;
4296*5113495bSYour Name 	}
4297*5113495bSYour Name 
4298*5113495bSYour Name 	TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__,
4299*5113495bSYour Name 		__LINE__, skb_frag_len, tso_frag_len);
4300*5113495bSYour Name 	num_seg = tso_info->num_segs;
4301*5113495bSYour Name 	tso_info->num_segs = 0;
4302*5113495bSYour Name 	tso_info->is_tso = 1;
4303*5113495bSYour Name 
4304*5113495bSYour Name 	while (num_seg && curr_seg) {
4305*5113495bSYour Name 		int i = 1; /* tso fragment index */
4306*5113495bSYour Name 		uint8_t more_tso_frags = 1;
4307*5113495bSYour Name 
4308*5113495bSYour Name 		curr_seg->seg.num_frags = 0;
4309*5113495bSYour Name 		tso_info->num_segs++;
4310*5113495bSYour Name 		total_num_seg->num_seg.tso_cmn_num_seg++;
4311*5113495bSYour Name 
4312*5113495bSYour Name 		__qdf_nbuf_fill_tso_cmn_seg_info(curr_seg,
4313*5113495bSYour Name 						 &tso_cmn_info);
4314*5113495bSYour Name 
4315*5113495bSYour Name 		/* If TCP PSH flag is set, set it in the last or only segment */
4316*5113495bSYour Name 		if (num_seg == 1)
4317*5113495bSYour Name 			curr_seg->seg.tso_flags.psh = tso_cmn_info.tcphdr->psh;
4318*5113495bSYour Name 
4319*5113495bSYour Name 		if (unlikely(skb_proc == 0))
4320*5113495bSYour Name 			return tso_info->num_segs;
4321*5113495bSYour Name 
4322*5113495bSYour Name 		curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
4323*5113495bSYour Name 		curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len;
4324*5113495bSYour Name 		/* frag len is added to ip_len in while loop below*/
4325*5113495bSYour Name 
4326*5113495bSYour Name 		curr_seg->seg.num_frags++;
4327*5113495bSYour Name 
4328*5113495bSYour Name 		while (more_tso_frags) {
4329*5113495bSYour Name 			if (tso_frag_len != 0) {
4330*5113495bSYour Name 				curr_seg->seg.tso_frags[i].vaddr =
4331*5113495bSYour Name 					tso_frag_vaddr;
4332*5113495bSYour Name 				curr_seg->seg.tso_frags[i].length =
4333*5113495bSYour Name 					tso_frag_len;
4334*5113495bSYour Name 				curr_seg->seg.total_len += tso_frag_len;
4335*5113495bSYour Name 				curr_seg->seg.tso_flags.ip_len +=  tso_frag_len;
4336*5113495bSYour Name 				curr_seg->seg.num_frags++;
4337*5113495bSYour Name 				skb_proc = skb_proc - tso_frag_len;
4338*5113495bSYour Name 
4339*5113495bSYour Name 				/* increment the TCP sequence number */
4340*5113495bSYour Name 
4341*5113495bSYour Name 				tso_cmn_info.tcp_seq_num += tso_frag_len;
4342*5113495bSYour Name 				curr_seg->seg.tso_frags[i].paddr =
4343*5113495bSYour Name 					tso_frag_paddr;
4344*5113495bSYour Name 
4345*5113495bSYour Name 				qdf_assert_always(curr_seg->seg.tso_frags[i].paddr);
4346*5113495bSYour Name 			}
4347*5113495bSYour Name 
4348*5113495bSYour Name 			TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n",
4349*5113495bSYour Name 					__func__, __LINE__,
4350*5113495bSYour Name 					i,
4351*5113495bSYour Name 					tso_frag_len,
4352*5113495bSYour Name 					curr_seg->seg.total_len,
4353*5113495bSYour Name 					curr_seg->seg.tso_frags[i].vaddr);
4354*5113495bSYour Name 
4355*5113495bSYour Name 			/* if there is no more data left in the skb */
4356*5113495bSYour Name 			if (!skb_proc)
4357*5113495bSYour Name 				return tso_info->num_segs;
4358*5113495bSYour Name 
4359*5113495bSYour Name 			/* get the next payload fragment information */
4360*5113495bSYour Name 			/* check if there are more fragments in this segment */
4361*5113495bSYour Name 			if (tso_frag_len < tso_seg_size) {
4362*5113495bSYour Name 				more_tso_frags = 1;
4363*5113495bSYour Name 				if (tso_frag_len != 0) {
4364*5113495bSYour Name 					tso_seg_size = tso_seg_size -
4365*5113495bSYour Name 						tso_frag_len;
4366*5113495bSYour Name 					i++;
4367*5113495bSYour Name 					if (curr_seg->seg.num_frags ==
4368*5113495bSYour Name 								FRAG_NUM_MAX) {
4369*5113495bSYour Name 						more_tso_frags = 0;
4370*5113495bSYour Name 						/*
4371*5113495bSYour Name 						 * reset i and the tso
4372*5113495bSYour Name 						 * payload size
4373*5113495bSYour Name 						 */
4374*5113495bSYour Name 						i = 1;
4375*5113495bSYour Name 						tso_seg_size =
4376*5113495bSYour Name 							skb_shinfo(skb)->
4377*5113495bSYour Name 								gso_size;
4378*5113495bSYour Name 					}
4379*5113495bSYour Name 				}
4380*5113495bSYour Name 			} else {
4381*5113495bSYour Name 				more_tso_frags = 0;
4382*5113495bSYour Name 				/* reset i and the tso payload size */
4383*5113495bSYour Name 				i = 1;
4384*5113495bSYour Name 				tso_seg_size = skb_shinfo(skb)->gso_size;
4385*5113495bSYour Name 			}
4386*5113495bSYour Name 
4387*5113495bSYour Name 			/* if the next fragment is contiguous */
4388*5113495bSYour Name 			if ((tso_frag_len != 0)  && (tso_frag_len < skb_frag_len)) {
4389*5113495bSYour Name 				tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
4390*5113495bSYour Name 				skb_frag_len = skb_frag_len - tso_frag_len;
4391*5113495bSYour Name 				tso_frag_len = min(skb_frag_len, tso_seg_size);
4392*5113495bSYour Name 
4393*5113495bSYour Name 			} else { /* the next fragment is not contiguous */
4394*5113495bSYour Name 				if (skb_shinfo(skb)->nr_frags == 0) {
4395*5113495bSYour Name 					qdf_info("TSO: nr_frags == 0!");
4396*5113495bSYour Name 					qdf_assert(0);
4397*5113495bSYour Name 					return 0;
4398*5113495bSYour Name 				}
4399*5113495bSYour Name 				if (j >= skb_shinfo(skb)->nr_frags) {
4400*5113495bSYour Name 					qdf_info("TSO: nr_frags %d j %d",
4401*5113495bSYour Name 						 skb_shinfo(skb)->nr_frags, j);
4402*5113495bSYour Name 					qdf_assert(0);
4403*5113495bSYour Name 					return 0;
4404*5113495bSYour Name 				}
4405*5113495bSYour Name 				frag = &skb_shinfo(skb)->frags[j];
4406*5113495bSYour Name 				skb_frag_len = skb_frag_size(frag);
4407*5113495bSYour Name 				tso_frag_len = min(skb_frag_len, tso_seg_size);
4408*5113495bSYour Name 				tso_frag_vaddr = skb_frag_address_safe(frag);
4409*5113495bSYour Name 				j++;
4410*5113495bSYour Name 			}
4411*5113495bSYour Name 
4412*5113495bSYour Name 			TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n",
4413*5113495bSYour Name 				__func__, __LINE__, skb_frag_len, tso_frag_len,
4414*5113495bSYour Name 				tso_seg_size);
4415*5113495bSYour Name 
4416*5113495bSYour Name 			if (!(tso_frag_vaddr)) {
4417*5113495bSYour Name 				TSO_DEBUG("%s: Fragment virtual addr is NULL",
4418*5113495bSYour Name 						__func__);
4419*5113495bSYour Name 				return 0;
4420*5113495bSYour Name 			}
4421*5113495bSYour Name 
4422*5113495bSYour Name 			tso_frag_paddr = qdf_nbuf_tso_map_frag(
4423*5113495bSYour Name 						osdev, tso_frag_vaddr,
4424*5113495bSYour Name 						tso_frag_len,
4425*5113495bSYour Name 						QDF_DMA_TO_DEVICE);
4426*5113495bSYour Name 			if (qdf_unlikely(!tso_frag_paddr))
4427*5113495bSYour Name 				return 0;
4428*5113495bSYour Name 		}
4429*5113495bSYour Name 		TSO_DEBUG("%s tcp_seq_num: %u", __func__,
4430*5113495bSYour Name 				curr_seg->seg.tso_flags.tcp_seq_num);
4431*5113495bSYour Name 		num_seg--;
4432*5113495bSYour Name 		/* if TCP FIN flag was set, set it in the last segment */
4433*5113495bSYour Name 		if (!num_seg)
4434*5113495bSYour Name 			curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
4435*5113495bSYour Name 
4436*5113495bSYour Name 		qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO);
4437*5113495bSYour Name 		curr_seg = curr_seg->next;
4438*5113495bSYour Name 	}
4439*5113495bSYour Name 	return tso_info->num_segs;
4440*5113495bSYour Name }
4441*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_get_tso_info);
4442*5113495bSYour Name 
__qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,struct qdf_tso_seg_elem_t * tso_seg,bool is_last_seg)4443*5113495bSYour Name void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
4444*5113495bSYour Name 			  struct qdf_tso_seg_elem_t *tso_seg,
4445*5113495bSYour Name 			  bool is_last_seg)
4446*5113495bSYour Name {
4447*5113495bSYour Name 	uint32_t num_frags = 0;
4448*5113495bSYour Name 
4449*5113495bSYour Name 	if (tso_seg->seg.num_frags > 0)
4450*5113495bSYour Name 		num_frags = tso_seg->seg.num_frags - 1;
4451*5113495bSYour Name 
4452*5113495bSYour Name 	/*Num of frags in a tso seg cannot be less than 2 */
4453*5113495bSYour Name 	if (num_frags < 1) {
4454*5113495bSYour Name 		/*
4455*5113495bSYour Name 		 * If Num of frags is 1 in a tso seg but is_last_seg true,
4456*5113495bSYour Name 		 * this may happen when qdf_nbuf_get_tso_info failed,
4457*5113495bSYour Name 		 * do dma unmap for the 0th frag in this seg.
4458*5113495bSYour Name 		 */
4459*5113495bSYour Name 		if (is_last_seg && tso_seg->seg.num_frags == 1)
4460*5113495bSYour Name 			goto last_seg_free_first_frag;
4461*5113495bSYour Name 
4462*5113495bSYour Name 		qdf_assert(0);
4463*5113495bSYour Name 		qdf_err("ERROR: num of frags in a tso segment is %d",
4464*5113495bSYour Name 			(num_frags + 1));
4465*5113495bSYour Name 		return;
4466*5113495bSYour Name 	}
4467*5113495bSYour Name 
4468*5113495bSYour Name 	while (num_frags) {
4469*5113495bSYour Name 		/*Do dma unmap the tso seg except the 0th frag */
4470*5113495bSYour Name 		if (0 ==  tso_seg->seg.tso_frags[num_frags].paddr) {
4471*5113495bSYour Name 			qdf_err("ERROR: TSO seg frag %d mapped physical address is NULL",
4472*5113495bSYour Name 				num_frags);
4473*5113495bSYour Name 			qdf_assert(0);
4474*5113495bSYour Name 			return;
4475*5113495bSYour Name 		}
4476*5113495bSYour Name 		qdf_nbuf_tso_unmap_frag(
4477*5113495bSYour Name 			osdev,
4478*5113495bSYour Name 			tso_seg->seg.tso_frags[num_frags].paddr,
4479*5113495bSYour Name 			tso_seg->seg.tso_frags[num_frags].length,
4480*5113495bSYour Name 			QDF_DMA_TO_DEVICE);
4481*5113495bSYour Name 		tso_seg->seg.tso_frags[num_frags].paddr = 0;
4482*5113495bSYour Name 		num_frags--;
4483*5113495bSYour Name 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
4484*5113495bSYour Name 	}
4485*5113495bSYour Name 
4486*5113495bSYour Name last_seg_free_first_frag:
4487*5113495bSYour Name 	if (is_last_seg) {
4488*5113495bSYour Name 		/*Do dma unmap for the tso seg 0th frag */
4489*5113495bSYour Name 		if (0 ==  tso_seg->seg.tso_frags[0].paddr) {
4490*5113495bSYour Name 			qdf_err("ERROR: TSO seg frag 0 mapped physical address is NULL");
4491*5113495bSYour Name 			qdf_assert(0);
4492*5113495bSYour Name 			return;
4493*5113495bSYour Name 		}
4494*5113495bSYour Name 		qdf_nbuf_tso_unmap_frag(osdev,
4495*5113495bSYour Name 					tso_seg->seg.tso_frags[0].paddr,
4496*5113495bSYour Name 					tso_seg->seg.tso_frags[0].length,
4497*5113495bSYour Name 					QDF_DMA_TO_DEVICE);
4498*5113495bSYour Name 		tso_seg->seg.tso_frags[0].paddr = 0;
4499*5113495bSYour Name 		qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST);
4500*5113495bSYour Name 	}
4501*5113495bSYour Name }
4502*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_unmap_tso_segment);
4503*5113495bSYour Name 
__qdf_nbuf_get_tcp_payload_len(struct sk_buff * skb)4504*5113495bSYour Name size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
4505*5113495bSYour Name {
4506*5113495bSYour Name 	size_t packet_len;
4507*5113495bSYour Name 
4508*5113495bSYour Name 	packet_len = skb->len -
4509*5113495bSYour Name 		((skb_transport_header(skb) - skb_mac_header(skb)) +
4510*5113495bSYour Name 		 tcp_hdrlen(skb));
4511*5113495bSYour Name 
4512*5113495bSYour Name 	return packet_len;
4513*5113495bSYour Name }
4514*5113495bSYour Name 
4515*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_get_tcp_payload_len);
4516*5113495bSYour Name 
4517*5113495bSYour Name #ifndef BUILD_X86
__qdf_nbuf_get_tso_num_seg(struct sk_buff * skb)4518*5113495bSYour Name uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
4519*5113495bSYour Name {
4520*5113495bSYour Name 	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
4521*5113495bSYour Name 	uint32_t remainder, num_segs = 0;
4522*5113495bSYour Name 	uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags;
4523*5113495bSYour Name 	uint8_t frags_per_tso = 0;
4524*5113495bSYour Name 	uint32_t skb_frag_len = 0;
4525*5113495bSYour Name 	uint32_t eit_hdr_len = (skb_transport_header(skb)
4526*5113495bSYour Name 			 - skb_mac_header(skb)) + tcp_hdrlen(skb);
4527*5113495bSYour Name 	skb_frag_t *frag = NULL;
4528*5113495bSYour Name 	int j = 0;
4529*5113495bSYour Name 	uint32_t temp_num_seg = 0;
4530*5113495bSYour Name 
4531*5113495bSYour Name 	/* length of the first chunk of data in the skb minus eit header*/
4532*5113495bSYour Name 	skb_frag_len = skb_headlen(skb) - eit_hdr_len;
4533*5113495bSYour Name 
4534*5113495bSYour Name 	/* Calculate num of segs for skb's first chunk of data*/
4535*5113495bSYour Name 	remainder = skb_frag_len % tso_seg_size;
4536*5113495bSYour Name 	num_segs = skb_frag_len / tso_seg_size;
4537*5113495bSYour Name 	/*
4538*5113495bSYour Name 	 * Remainder non-zero and nr_frags zero implies end of skb data.
4539*5113495bSYour Name 	 * In that case, one more tso seg is required to accommodate
4540*5113495bSYour Name 	 * remaining data, hence num_segs++. If nr_frags is non-zero,
4541*5113495bSYour Name 	 * then remaining data will be accommodated while doing the calculation
4542*5113495bSYour Name 	 * for nr_frags data. Hence, frags_per_tso++.
4543*5113495bSYour Name 	 */
4544*5113495bSYour Name 	if (remainder) {
4545*5113495bSYour Name 		if (!skb_nr_frags)
4546*5113495bSYour Name 			num_segs++;
4547*5113495bSYour Name 		else
4548*5113495bSYour Name 			frags_per_tso++;
4549*5113495bSYour Name 	}
4550*5113495bSYour Name 
4551*5113495bSYour Name 	while (skb_nr_frags) {
4552*5113495bSYour Name 		if (j >= skb_shinfo(skb)->nr_frags) {
4553*5113495bSYour Name 			qdf_info("TSO: nr_frags %d j %d",
4554*5113495bSYour Name 				 skb_shinfo(skb)->nr_frags, j);
4555*5113495bSYour Name 			qdf_assert(0);
4556*5113495bSYour Name 			return 0;
4557*5113495bSYour Name 		}
4558*5113495bSYour Name 		/*
4559*5113495bSYour Name 		 * Calculate the number of tso seg for nr_frags data:
4560*5113495bSYour Name 		 * Get the length of each frag in skb_frag_len, add to
4561*5113495bSYour Name 		 * remainder.Get the number of segments by dividing it to
4562*5113495bSYour Name 		 * tso_seg_size and calculate the new remainder.
4563*5113495bSYour Name 		 * Decrement the nr_frags value and keep
4564*5113495bSYour Name 		 * looping all the skb_fragments.
4565*5113495bSYour Name 		 */
4566*5113495bSYour Name 		frag = &skb_shinfo(skb)->frags[j];
4567*5113495bSYour Name 		skb_frag_len = skb_frag_size(frag);
4568*5113495bSYour Name 		temp_num_seg = num_segs;
4569*5113495bSYour Name 		remainder += skb_frag_len;
4570*5113495bSYour Name 		num_segs += remainder / tso_seg_size;
4571*5113495bSYour Name 		remainder = remainder % tso_seg_size;
4572*5113495bSYour Name 		skb_nr_frags--;
4573*5113495bSYour Name 		if (remainder) {
4574*5113495bSYour Name 			if (num_segs > temp_num_seg)
4575*5113495bSYour Name 				frags_per_tso = 0;
4576*5113495bSYour Name 			/*
4577*5113495bSYour Name 			 * increment the tso per frags whenever remainder is
4578*5113495bSYour Name 			 * positive. If frags_per_tso reaches the (max-1),
4579*5113495bSYour Name 			 * [First frags always have EIT header, therefore max-1]
4580*5113495bSYour Name 			 * increment the num_segs as no more data can be
4581*5113495bSYour Name 			 * accommodated in the curr tso seg. Reset the remainder
4582*5113495bSYour Name 			 * and frags per tso and keep looping.
4583*5113495bSYour Name 			 */
4584*5113495bSYour Name 			frags_per_tso++;
4585*5113495bSYour Name 			if (frags_per_tso == FRAG_NUM_MAX - 1) {
4586*5113495bSYour Name 				num_segs++;
4587*5113495bSYour Name 				frags_per_tso = 0;
4588*5113495bSYour Name 				remainder = 0;
4589*5113495bSYour Name 			}
4590*5113495bSYour Name 			/*
4591*5113495bSYour Name 			 * If this is the last skb frag and still remainder is
4592*5113495bSYour Name 			 * non-zero(frags_per_tso is not reached to the max-1)
4593*5113495bSYour Name 			 * then increment the num_segs to take care of the
4594*5113495bSYour Name 			 * remaining length.
4595*5113495bSYour Name 			 */
4596*5113495bSYour Name 			if (!skb_nr_frags && remainder) {
4597*5113495bSYour Name 				num_segs++;
4598*5113495bSYour Name 				frags_per_tso = 0;
4599*5113495bSYour Name 			}
4600*5113495bSYour Name 		} else {
4601*5113495bSYour Name 			 /* Whenever remainder is 0, reset the frags_per_tso. */
4602*5113495bSYour Name 			frags_per_tso = 0;
4603*5113495bSYour Name 		}
4604*5113495bSYour Name 		j++;
4605*5113495bSYour Name 	}
4606*5113495bSYour Name 
4607*5113495bSYour Name 	return num_segs;
4608*5113495bSYour Name }
4609*5113495bSYour Name #elif !defined(QCA_WIFI_QCN9000)
__qdf_nbuf_get_tso_num_seg(struct sk_buff * skb)4610*5113495bSYour Name uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
4611*5113495bSYour Name {
4612*5113495bSYour Name 	uint32_t i, gso_size, tmp_len, num_segs = 0;
4613*5113495bSYour Name 	skb_frag_t *frag = NULL;
4614*5113495bSYour Name 
4615*5113495bSYour Name 	/*
4616*5113495bSYour Name 	 * Check if the head SKB or any of frags are allocated in < 0x50000000
4617*5113495bSYour Name 	 * region which cannot be accessed by Target
4618*5113495bSYour Name 	 */
4619*5113495bSYour Name 	if (virt_to_phys(skb->data) < 0x50000040) {
4620*5113495bSYour Name 		TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n",
4621*5113495bSYour Name 				__func__, __LINE__, skb_shinfo(skb)->nr_frags,
4622*5113495bSYour Name 				virt_to_phys(skb->data));
4623*5113495bSYour Name 		goto fail;
4624*5113495bSYour Name 
4625*5113495bSYour Name 	}
4626*5113495bSYour Name 
4627*5113495bSYour Name 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4628*5113495bSYour Name 		frag = &skb_shinfo(skb)->frags[i];
4629*5113495bSYour Name 
4630*5113495bSYour Name 		if (!frag)
4631*5113495bSYour Name 			goto fail;
4632*5113495bSYour Name 
4633*5113495bSYour Name 		if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040)
4634*5113495bSYour Name 			goto fail;
4635*5113495bSYour Name 	}
4636*5113495bSYour Name 
4637*5113495bSYour Name 
4638*5113495bSYour Name 	gso_size = skb_shinfo(skb)->gso_size;
4639*5113495bSYour Name 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
4640*5113495bSYour Name 			+ tcp_hdrlen(skb));
4641*5113495bSYour Name 	while (tmp_len) {
4642*5113495bSYour Name 		num_segs++;
4643*5113495bSYour Name 		if (tmp_len > gso_size)
4644*5113495bSYour Name 			tmp_len -= gso_size;
4645*5113495bSYour Name 		else
4646*5113495bSYour Name 			break;
4647*5113495bSYour Name 	}
4648*5113495bSYour Name 
4649*5113495bSYour Name 	return num_segs;
4650*5113495bSYour Name 
4651*5113495bSYour Name 	/*
4652*5113495bSYour Name 	 * Do not free this frame, just do socket level accounting
4653*5113495bSYour Name 	 * so that this is not reused.
4654*5113495bSYour Name 	 */
4655*5113495bSYour Name fail:
4656*5113495bSYour Name 	if (skb->sk)
4657*5113495bSYour Name 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
4658*5113495bSYour Name 
4659*5113495bSYour Name 	return 0;
4660*5113495bSYour Name }
4661*5113495bSYour Name #else
__qdf_nbuf_get_tso_num_seg(struct sk_buff * skb)4662*5113495bSYour Name uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
4663*5113495bSYour Name {
4664*5113495bSYour Name 	uint32_t i, gso_size, tmp_len, num_segs = 0;
4665*5113495bSYour Name 	skb_frag_t *frag = NULL;
4666*5113495bSYour Name 
4667*5113495bSYour Name 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4668*5113495bSYour Name 		frag = &skb_shinfo(skb)->frags[i];
4669*5113495bSYour Name 
4670*5113495bSYour Name 		if (!frag)
4671*5113495bSYour Name 			goto fail;
4672*5113495bSYour Name 	}
4673*5113495bSYour Name 
4674*5113495bSYour Name 	gso_size = skb_shinfo(skb)->gso_size;
4675*5113495bSYour Name 	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
4676*5113495bSYour Name 			+ tcp_hdrlen(skb));
4677*5113495bSYour Name 	while (tmp_len) {
4678*5113495bSYour Name 		num_segs++;
4679*5113495bSYour Name 		if (tmp_len > gso_size)
4680*5113495bSYour Name 			tmp_len -= gso_size;
4681*5113495bSYour Name 		else
4682*5113495bSYour Name 			break;
4683*5113495bSYour Name 	}
4684*5113495bSYour Name 
4685*5113495bSYour Name 	return num_segs;
4686*5113495bSYour Name 
4687*5113495bSYour Name 	/*
4688*5113495bSYour Name 	 * Do not free this frame, just do socket level accounting
4689*5113495bSYour Name 	 * so that this is not reused.
4690*5113495bSYour Name 	 */
4691*5113495bSYour Name fail:
4692*5113495bSYour Name 	if (skb->sk)
4693*5113495bSYour Name 		atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
4694*5113495bSYour Name 
4695*5113495bSYour Name 	return 0;
4696*5113495bSYour Name }
4697*5113495bSYour Name #endif
4698*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_get_tso_num_seg);
4699*5113495bSYour Name 
4700*5113495bSYour Name #endif /* FEATURE_TSO */
4701*5113495bSYour Name 
__qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,uint32_t * lo,uint32_t * hi)4702*5113495bSYour Name void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
4703*5113495bSYour Name 			  uint32_t *lo, uint32_t *hi)
4704*5113495bSYour Name {
4705*5113495bSYour Name 	if (sizeof(dmaaddr) > sizeof(uint32_t)) {
4706*5113495bSYour Name 		*lo = lower_32_bits(dmaaddr);
4707*5113495bSYour Name 		*hi = upper_32_bits(dmaaddr);
4708*5113495bSYour Name 	} else {
4709*5113495bSYour Name 		*lo = dmaaddr;
4710*5113495bSYour Name 		*hi = 0;
4711*5113495bSYour Name 	}
4712*5113495bSYour Name }
4713*5113495bSYour Name 
4714*5113495bSYour Name qdf_export_symbol(__qdf_dmaaddr_to_32s);
4715*5113495bSYour Name 
__qdf_nbuf_inc_users(struct sk_buff * skb)4716*5113495bSYour Name struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
4717*5113495bSYour Name {
4718*5113495bSYour Name 	qdf_nbuf_users_inc(&skb->users);
4719*5113495bSYour Name 	return skb;
4720*5113495bSYour Name }
4721*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_inc_users);
4722*5113495bSYour Name 
__qdf_nbuf_get_users(struct sk_buff * skb)4723*5113495bSYour Name int __qdf_nbuf_get_users(struct sk_buff *skb)
4724*5113495bSYour Name {
4725*5113495bSYour Name 	return qdf_nbuf_users_read(&skb->users);
4726*5113495bSYour Name }
4727*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_get_users);
4728*5113495bSYour Name 
__qdf_nbuf_ref(struct sk_buff * skb)4729*5113495bSYour Name void __qdf_nbuf_ref(struct sk_buff *skb)
4730*5113495bSYour Name {
4731*5113495bSYour Name 	skb_get(skb);
4732*5113495bSYour Name }
4733*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_ref);
4734*5113495bSYour Name 
__qdf_nbuf_shared(struct sk_buff * skb)4735*5113495bSYour Name int __qdf_nbuf_shared(struct sk_buff *skb)
4736*5113495bSYour Name {
4737*5113495bSYour Name 	return skb_shared(skb);
4738*5113495bSYour Name }
4739*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_shared);
4740*5113495bSYour Name 
4741*5113495bSYour Name QDF_STATUS
__qdf_nbuf_dmamap_create(qdf_device_t osdev,__qdf_dma_map_t * dmap)4742*5113495bSYour Name __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
4743*5113495bSYour Name {
4744*5113495bSYour Name 	QDF_STATUS error = QDF_STATUS_SUCCESS;
4745*5113495bSYour Name 	/*
4746*5113495bSYour Name 	 * driver can tell its SG capability, it must be handled.
4747*5113495bSYour Name 	 * Bounce buffers if they are there
4748*5113495bSYour Name 	 */
4749*5113495bSYour Name 	(*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
4750*5113495bSYour Name 	if (!(*dmap))
4751*5113495bSYour Name 		error = QDF_STATUS_E_NOMEM;
4752*5113495bSYour Name 
4753*5113495bSYour Name 	return error;
4754*5113495bSYour Name }
4755*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_dmamap_create);
4756*5113495bSYour Name 
4757*5113495bSYour Name void
__qdf_nbuf_dmamap_destroy(qdf_device_t osdev,__qdf_dma_map_t dmap)4758*5113495bSYour Name __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
4759*5113495bSYour Name {
4760*5113495bSYour Name 	kfree(dmap);
4761*5113495bSYour Name }
4762*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_dmamap_destroy);
4763*5113495bSYour Name 
4764*5113495bSYour Name #ifdef QDF_OS_DEBUG
4765*5113495bSYour Name QDF_STATUS
__qdf_nbuf_map_nbytes(qdf_device_t osdev,struct sk_buff * skb,qdf_dma_dir_t dir,int nbytes)4766*5113495bSYour Name __qdf_nbuf_map_nbytes(
4767*5113495bSYour Name 	qdf_device_t osdev,
4768*5113495bSYour Name 	struct sk_buff *skb,
4769*5113495bSYour Name 	qdf_dma_dir_t dir,
4770*5113495bSYour Name 	int nbytes)
4771*5113495bSYour Name {
4772*5113495bSYour Name 	struct skb_shared_info  *sh = skb_shinfo(skb);
4773*5113495bSYour Name 
4774*5113495bSYour Name 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
4775*5113495bSYour Name 
4776*5113495bSYour Name 	/*
4777*5113495bSYour Name 	 * Assume there's only a single fragment.
4778*5113495bSYour Name 	 * To support multiple fragments, it would be necessary to change
4779*5113495bSYour Name 	 * adf_nbuf_t to be a separate object that stores meta-info
4780*5113495bSYour Name 	 * (including the bus address for each fragment) and a pointer
4781*5113495bSYour Name 	 * to the underlying sk_buff.
4782*5113495bSYour Name 	 */
4783*5113495bSYour Name 	qdf_assert(sh->nr_frags == 0);
4784*5113495bSYour Name 
4785*5113495bSYour Name 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
4786*5113495bSYour Name }
4787*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_map_nbytes);
4788*5113495bSYour Name #else
4789*5113495bSYour Name QDF_STATUS
__qdf_nbuf_map_nbytes(qdf_device_t osdev,struct sk_buff * skb,qdf_dma_dir_t dir,int nbytes)4790*5113495bSYour Name __qdf_nbuf_map_nbytes(
4791*5113495bSYour Name 	qdf_device_t osdev,
4792*5113495bSYour Name 	struct sk_buff *skb,
4793*5113495bSYour Name 	qdf_dma_dir_t dir,
4794*5113495bSYour Name 	int nbytes)
4795*5113495bSYour Name {
4796*5113495bSYour Name 	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
4797*5113495bSYour Name }
4798*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_map_nbytes);
4799*5113495bSYour Name #endif
4800*5113495bSYour Name void
__qdf_nbuf_unmap_nbytes(qdf_device_t osdev,struct sk_buff * skb,qdf_dma_dir_t dir,int nbytes)4801*5113495bSYour Name __qdf_nbuf_unmap_nbytes(
4802*5113495bSYour Name 	qdf_device_t osdev,
4803*5113495bSYour Name 	struct sk_buff *skb,
4804*5113495bSYour Name 	qdf_dma_dir_t dir,
4805*5113495bSYour Name 	int nbytes)
4806*5113495bSYour Name {
4807*5113495bSYour Name 	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
4808*5113495bSYour Name 
4809*5113495bSYour Name 	/*
4810*5113495bSYour Name 	 * Assume there's a single fragment.
4811*5113495bSYour Name 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
4812*5113495bSYour Name 	 */
4813*5113495bSYour Name 	__qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
4814*5113495bSYour Name }
4815*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_unmap_nbytes);
4816*5113495bSYour Name 
4817*5113495bSYour Name void
__qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap,qdf_dmamap_info_t * sg)4818*5113495bSYour Name __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
4819*5113495bSYour Name {
4820*5113495bSYour Name 	qdf_assert(bmap->mapped);
4821*5113495bSYour Name 	qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
4822*5113495bSYour Name 
4823*5113495bSYour Name 	memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
4824*5113495bSYour Name 			sizeof(struct __qdf_segment));
4825*5113495bSYour Name 	sg->nsegs = bmap->nsegs;
4826*5113495bSYour Name }
4827*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_dma_map_info);
4828*5113495bSYour Name 
4829*5113495bSYour Name #if defined(__QDF_SUPPORT_FRAG_MEM)
4830*5113495bSYour Name void
__qdf_nbuf_frag_info(struct sk_buff * skb,qdf_sglist_t * sg)4831*5113495bSYour Name __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
4832*5113495bSYour Name {
4833*5113495bSYour Name 	qdf_assert(skb);
4834*5113495bSYour Name 	sg->sg_segs[0].vaddr = skb->data;
4835*5113495bSYour Name 	sg->sg_segs[0].len   = skb->len;
4836*5113495bSYour Name 	sg->nsegs            = 1;
4837*5113495bSYour Name 
4838*5113495bSYour Name 	for (int i = 1; i <= sh->nr_frags; i++) {
4839*5113495bSYour Name 		skb_frag_t    *f        = &sh->frags[i - 1];
4840*5113495bSYour Name 
4841*5113495bSYour Name 		sg->sg_segs[i].vaddr    = (uint8_t *)(page_address(f->page) +
4842*5113495bSYour Name 			f->page_offset);
4843*5113495bSYour Name 		sg->sg_segs[i].len      = f->size;
4844*5113495bSYour Name 
4845*5113495bSYour Name 		qdf_assert(i < QDF_MAX_SGLIST);
4846*5113495bSYour Name 	}
4847*5113495bSYour Name 	sg->nsegs += i;
4848*5113495bSYour Name 
4849*5113495bSYour Name }
4850*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_frag_info);
4851*5113495bSYour Name #else
4852*5113495bSYour Name #ifdef QDF_OS_DEBUG
4853*5113495bSYour Name void
__qdf_nbuf_frag_info(struct sk_buff * skb,qdf_sglist_t * sg)4854*5113495bSYour Name __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
4855*5113495bSYour Name {
4856*5113495bSYour Name 
4857*5113495bSYour Name 	struct skb_shared_info  *sh = skb_shinfo(skb);
4858*5113495bSYour Name 
4859*5113495bSYour Name 	qdf_assert(skb);
4860*5113495bSYour Name 	sg->sg_segs[0].vaddr = skb->data;
4861*5113495bSYour Name 	sg->sg_segs[0].len   = skb->len;
4862*5113495bSYour Name 	sg->nsegs            = 1;
4863*5113495bSYour Name 
4864*5113495bSYour Name 	qdf_assert(sh->nr_frags == 0);
4865*5113495bSYour Name }
4866*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_frag_info);
4867*5113495bSYour Name #else
4868*5113495bSYour Name void
__qdf_nbuf_frag_info(struct sk_buff * skb,qdf_sglist_t * sg)4869*5113495bSYour Name __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
4870*5113495bSYour Name {
4871*5113495bSYour Name 	sg->sg_segs[0].vaddr = skb->data;
4872*5113495bSYour Name 	sg->sg_segs[0].len   = skb->len;
4873*5113495bSYour Name 	sg->nsegs            = 1;
4874*5113495bSYour Name }
4875*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_frag_info);
4876*5113495bSYour Name #endif
4877*5113495bSYour Name #endif
4878*5113495bSYour Name uint32_t
__qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf,uint32_t cur_frag)4879*5113495bSYour Name __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
4880*5113495bSYour Name {
4881*5113495bSYour Name 	struct skb_shared_info  *sh = skb_shinfo(nbuf);
4882*5113495bSYour Name 	const skb_frag_t *frag = sh->frags + cur_frag;
4883*5113495bSYour Name 
4884*5113495bSYour Name 	return skb_frag_size(frag);
4885*5113495bSYour Name }
4886*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_get_frag_size);
4887*5113495bSYour Name 
4888*5113495bSYour Name #ifdef A_SIMOS_DEVHOST
__qdf_nbuf_frag_map(qdf_device_t osdev,__qdf_nbuf_t nbuf,int offset,qdf_dma_dir_t dir,int cur_frag)4889*5113495bSYour Name QDF_STATUS __qdf_nbuf_frag_map(
4890*5113495bSYour Name 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
4891*5113495bSYour Name 	int offset, qdf_dma_dir_t dir, int cur_frag)
4892*5113495bSYour Name {
4893*5113495bSYour Name 	int32_t paddr, frag_len;
4894*5113495bSYour Name 
4895*5113495bSYour Name 	QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data;
4896*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
4897*5113495bSYour Name }
4898*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_frag_map);
4899*5113495bSYour Name #else
__qdf_nbuf_frag_map(qdf_device_t osdev,__qdf_nbuf_t nbuf,int offset,qdf_dma_dir_t dir,int cur_frag)4900*5113495bSYour Name QDF_STATUS __qdf_nbuf_frag_map(
4901*5113495bSYour Name 	qdf_device_t osdev, __qdf_nbuf_t nbuf,
4902*5113495bSYour Name 	int offset, qdf_dma_dir_t dir, int cur_frag)
4903*5113495bSYour Name {
4904*5113495bSYour Name 	dma_addr_t paddr, frag_len;
4905*5113495bSYour Name 	struct skb_shared_info *sh = skb_shinfo(nbuf);
4906*5113495bSYour Name 	const skb_frag_t *frag = sh->frags + cur_frag;
4907*5113495bSYour Name 
4908*5113495bSYour Name 	frag_len = skb_frag_size(frag);
4909*5113495bSYour Name 
4910*5113495bSYour Name 	QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
4911*5113495bSYour Name 		skb_frag_dma_map(osdev->dev, frag, offset, frag_len,
4912*5113495bSYour Name 					__qdf_dma_dir_to_os(dir));
4913*5113495bSYour Name 	return dma_mapping_error(osdev->dev, paddr) ?
4914*5113495bSYour Name 			QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
4915*5113495bSYour Name }
4916*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_frag_map);
4917*5113495bSYour Name #endif
4918*5113495bSYour Name void
__qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap,void * cb,void * arg)4919*5113495bSYour Name __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
4920*5113495bSYour Name {
4921*5113495bSYour Name 	return;
4922*5113495bSYour Name }
4923*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_dmamap_set_cb);
4924*5113495bSYour Name 
4925*5113495bSYour Name /**
4926*5113495bSYour Name  * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
4927*5113495bSYour Name  * @osdev: os device
4928*5113495bSYour Name  * @buf: sk buff
4929*5113495bSYour Name  * @dir: direction
4930*5113495bSYour Name  *
4931*5113495bSYour Name  * Return: none
4932*5113495bSYour Name  */
4933*5113495bSYour Name #if defined(A_SIMOS_DEVHOST)
__qdf_nbuf_sync_single_for_cpu(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_dir_t dir)4934*5113495bSYour Name static void __qdf_nbuf_sync_single_for_cpu(
4935*5113495bSYour Name 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
4936*5113495bSYour Name {
4937*5113495bSYour Name 	return;
4938*5113495bSYour Name }
4939*5113495bSYour Name #else
__qdf_nbuf_sync_single_for_cpu(qdf_device_t osdev,qdf_nbuf_t buf,qdf_dma_dir_t dir)4940*5113495bSYour Name static void __qdf_nbuf_sync_single_for_cpu(
4941*5113495bSYour Name 	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
4942*5113495bSYour Name {
4943*5113495bSYour Name 	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
4944*5113495bSYour Name 		qdf_err("ERROR: NBUF mapped physical address is NULL");
4945*5113495bSYour Name 		return;
4946*5113495bSYour Name 	}
4947*5113495bSYour Name 	dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
4948*5113495bSYour Name 		skb_end_offset(buf) - skb_headroom(buf),
4949*5113495bSYour Name 		__qdf_dma_dir_to_os(dir));
4950*5113495bSYour Name }
4951*5113495bSYour Name #endif
4952*5113495bSYour Name 
4953*5113495bSYour Name void
__qdf_nbuf_sync_for_cpu(qdf_device_t osdev,struct sk_buff * skb,qdf_dma_dir_t dir)4954*5113495bSYour Name __qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
4955*5113495bSYour Name 	struct sk_buff *skb, qdf_dma_dir_t dir)
4956*5113495bSYour Name {
4957*5113495bSYour Name 	qdf_assert(
4958*5113495bSYour Name 	(dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
4959*5113495bSYour Name 
4960*5113495bSYour Name 	/*
4961*5113495bSYour Name 	 * Assume there's a single fragment.
4962*5113495bSYour Name 	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
4963*5113495bSYour Name 	 */
4964*5113495bSYour Name 	__qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
4965*5113495bSYour Name }
4966*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_sync_for_cpu);
4967*5113495bSYour Name 
4968*5113495bSYour Name #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
4969*5113495bSYour Name /**
4970*5113495bSYour Name  * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags
4971*5113495bSYour Name  * @rx_status: Pointer to rx_status.
4972*5113495bSYour Name  * @rtap_buf: Buf to which VHT info has to be updated.
4973*5113495bSYour Name  * @rtap_len: Current length of radiotap buffer
4974*5113495bSYour Name  *
4975*5113495bSYour Name  * Return: Length of radiotap after VHT flags updated.
4976*5113495bSYour Name  */
qdf_nbuf_update_radiotap_vht_flags(struct mon_rx_status * rx_status,int8_t * rtap_buf,uint32_t rtap_len)4977*5113495bSYour Name static unsigned int qdf_nbuf_update_radiotap_vht_flags(
4978*5113495bSYour Name 					struct mon_rx_status *rx_status,
4979*5113495bSYour Name 					int8_t *rtap_buf,
4980*5113495bSYour Name 					uint32_t rtap_len)
4981*5113495bSYour Name {
4982*5113495bSYour Name 	uint16_t vht_flags = 0;
4983*5113495bSYour Name 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
4984*5113495bSYour Name 
4985*5113495bSYour Name 	rtap_len = qdf_align(rtap_len, 2);
4986*5113495bSYour Name 
4987*5113495bSYour Name 	/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
4988*5113495bSYour Name 	vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
4989*5113495bSYour Name 		IEEE80211_RADIOTAP_VHT_KNOWN_GI |
4990*5113495bSYour Name 		IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM |
4991*5113495bSYour Name 		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED |
4992*5113495bSYour Name 		IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH |
4993*5113495bSYour Name 		IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID;
4994*5113495bSYour Name 	put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]);
4995*5113495bSYour Name 	rtap_len += 2;
4996*5113495bSYour Name 
4997*5113495bSYour Name 	rtap_buf[rtap_len] |=
4998*5113495bSYour Name 		(rx_status->is_stbc ?
4999*5113495bSYour Name 		 IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) |
5000*5113495bSYour Name 		(rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) |
5001*5113495bSYour Name 		(rx_status->ldpc ?
5002*5113495bSYour Name 		 IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) |
5003*5113495bSYour Name 		(rx_status->beamformed ?
5004*5113495bSYour Name 		 IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0);
5005*5113495bSYour Name 	rtap_len += 1;
5006*5113495bSYour Name 
5007*5113495bSYour Name 	if (!rx_user_status) {
5008*5113495bSYour Name 		switch (rx_status->vht_flag_values2) {
5009*5113495bSYour Name 		case IEEE80211_RADIOTAP_VHT_BW_20:
5010*5113495bSYour Name 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
5011*5113495bSYour Name 			break;
5012*5113495bSYour Name 		case IEEE80211_RADIOTAP_VHT_BW_40:
5013*5113495bSYour Name 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
5014*5113495bSYour Name 			break;
5015*5113495bSYour Name 		case IEEE80211_RADIOTAP_VHT_BW_80:
5016*5113495bSYour Name 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
5017*5113495bSYour Name 			break;
5018*5113495bSYour Name 		case IEEE80211_RADIOTAP_VHT_BW_160:
5019*5113495bSYour Name 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
5020*5113495bSYour Name 			break;
5021*5113495bSYour Name 		}
5022*5113495bSYour Name 		rtap_len += 1;
5023*5113495bSYour Name 		rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]);
5024*5113495bSYour Name 		rtap_len += 1;
5025*5113495bSYour Name 		rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]);
5026*5113495bSYour Name 		rtap_len += 1;
5027*5113495bSYour Name 		rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]);
5028*5113495bSYour Name 		rtap_len += 1;
5029*5113495bSYour Name 		rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]);
5030*5113495bSYour Name 		rtap_len += 1;
5031*5113495bSYour Name 		rtap_buf[rtap_len] = (rx_status->vht_flag_values4);
5032*5113495bSYour Name 		rtap_len += 1;
5033*5113495bSYour Name 		rtap_buf[rtap_len] = (rx_status->vht_flag_values5);
5034*5113495bSYour Name 		rtap_len += 1;
5035*5113495bSYour Name 		put_unaligned_le16(rx_status->vht_flag_values6,
5036*5113495bSYour Name 				   &rtap_buf[rtap_len]);
5037*5113495bSYour Name 		rtap_len += 2;
5038*5113495bSYour Name 	} else {
5039*5113495bSYour Name 		switch (rx_user_status->vht_flag_values2) {
5040*5113495bSYour Name 		case IEEE80211_RADIOTAP_VHT_BW_20:
5041*5113495bSYour Name 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
5042*5113495bSYour Name 			break;
5043*5113495bSYour Name 		case IEEE80211_RADIOTAP_VHT_BW_40:
5044*5113495bSYour Name 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
5045*5113495bSYour Name 			break;
5046*5113495bSYour Name 		case IEEE80211_RADIOTAP_VHT_BW_80:
5047*5113495bSYour Name 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
5048*5113495bSYour Name 			break;
5049*5113495bSYour Name 		case IEEE80211_RADIOTAP_VHT_BW_160:
5050*5113495bSYour Name 			rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
5051*5113495bSYour Name 			break;
5052*5113495bSYour Name 		}
5053*5113495bSYour Name 		rtap_len += 1;
5054*5113495bSYour Name 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[0]);
5055*5113495bSYour Name 		rtap_len += 1;
5056*5113495bSYour Name 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[1]);
5057*5113495bSYour Name 		rtap_len += 1;
5058*5113495bSYour Name 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[2]);
5059*5113495bSYour Name 		rtap_len += 1;
5060*5113495bSYour Name 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[3]);
5061*5113495bSYour Name 		rtap_len += 1;
5062*5113495bSYour Name 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values4);
5063*5113495bSYour Name 		rtap_len += 1;
5064*5113495bSYour Name 		rtap_buf[rtap_len] = (rx_user_status->vht_flag_values5);
5065*5113495bSYour Name 		rtap_len += 1;
5066*5113495bSYour Name 		put_unaligned_le16(rx_user_status->vht_flag_values6,
5067*5113495bSYour Name 				   &rtap_buf[rtap_len]);
5068*5113495bSYour Name 		rtap_len += 2;
5069*5113495bSYour Name 	}
5070*5113495bSYour Name 
5071*5113495bSYour Name 	return rtap_len;
5072*5113495bSYour Name }
5073*5113495bSYour Name 
5074*5113495bSYour Name /**
5075*5113495bSYour Name  * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status
5076*5113495bSYour Name  * @rx_status: Pointer to rx_status.
5077*5113495bSYour Name  * @rtap_buf: buffer to which radiotap has to be updated
5078*5113495bSYour Name  * @rtap_len: radiotap length
5079*5113495bSYour Name  *
5080*5113495bSYour Name  * API update high-efficiency (11ax) fields in the radiotap header
5081*5113495bSYour Name  *
5082*5113495bSYour Name  * Return: length of rtap_len updated.
5083*5113495bSYour Name  */
5084*5113495bSYour Name static unsigned int
qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status * rx_status,int8_t * rtap_buf,uint32_t rtap_len)5085*5113495bSYour Name qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
5086*5113495bSYour Name 				     int8_t *rtap_buf, uint32_t rtap_len)
5087*5113495bSYour Name {
5088*5113495bSYour Name 	/*
5089*5113495bSYour Name 	 * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16
5090*5113495bSYour Name 	 * Enable all "known" HE radiotap flags for now
5091*5113495bSYour Name 	 */
5092*5113495bSYour Name 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
5093*5113495bSYour Name 
5094*5113495bSYour Name 	rtap_len = qdf_align(rtap_len, 2);
5095*5113495bSYour Name 
5096*5113495bSYour Name 	if (!rx_user_status) {
5097*5113495bSYour Name 		put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
5098*5113495bSYour Name 		rtap_len += 2;
5099*5113495bSYour Name 
5100*5113495bSYour Name 		put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
5101*5113495bSYour Name 		rtap_len += 2;
5102*5113495bSYour Name 
5103*5113495bSYour Name 		put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
5104*5113495bSYour Name 		rtap_len += 2;
5105*5113495bSYour Name 
5106*5113495bSYour Name 		put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
5107*5113495bSYour Name 		rtap_len += 2;
5108*5113495bSYour Name 
5109*5113495bSYour Name 		put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
5110*5113495bSYour Name 		rtap_len += 2;
5111*5113495bSYour Name 
5112*5113495bSYour Name 		put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
5113*5113495bSYour Name 		rtap_len += 2;
5114*5113495bSYour Name 	} else {
5115*5113495bSYour Name 		put_unaligned_le16(rx_user_status->he_data1 |
5116*5113495bSYour Name 				   rx_status->he_data1, &rtap_buf[rtap_len]);
5117*5113495bSYour Name 		rtap_len += 2;
5118*5113495bSYour Name 
5119*5113495bSYour Name 		put_unaligned_le16(rx_user_status->he_data2 |
5120*5113495bSYour Name 				   rx_status->he_data2, &rtap_buf[rtap_len]);
5121*5113495bSYour Name 		rtap_len += 2;
5122*5113495bSYour Name 
5123*5113495bSYour Name 		put_unaligned_le16(rx_user_status->he_data3 |
5124*5113495bSYour Name 				   rx_status->he_data3, &rtap_buf[rtap_len]);
5125*5113495bSYour Name 		rtap_len += 2;
5126*5113495bSYour Name 
5127*5113495bSYour Name 		put_unaligned_le16(rx_user_status->he_data4 |
5128*5113495bSYour Name 				   rx_status->he_data4, &rtap_buf[rtap_len]);
5129*5113495bSYour Name 		rtap_len += 2;
5130*5113495bSYour Name 
5131*5113495bSYour Name 		put_unaligned_le16(rx_user_status->he_data5 |
5132*5113495bSYour Name 				   rx_status->he_data5, &rtap_buf[rtap_len]);
5133*5113495bSYour Name 		rtap_len += 2;
5134*5113495bSYour Name 
5135*5113495bSYour Name 		put_unaligned_le16(rx_user_status->he_data6 |
5136*5113495bSYour Name 				   rx_status->he_data6, &rtap_buf[rtap_len]);
5137*5113495bSYour Name 		rtap_len += 2;
5138*5113495bSYour Name 	}
5139*5113495bSYour Name 
5140*5113495bSYour Name 	return rtap_len;
5141*5113495bSYour Name }
5142*5113495bSYour Name 
5143*5113495bSYour Name 
5144*5113495bSYour Name /**
5145*5113495bSYour Name  * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags
5146*5113495bSYour Name  * @rx_status: Pointer to rx_status.
5147*5113495bSYour Name  * @rtap_buf: buffer to which radiotap has to be updated
5148*5113495bSYour Name  * @rtap_len: radiotap length
5149*5113495bSYour Name  *
5150*5113495bSYour Name  * API update HE-MU fields in the radiotap header
5151*5113495bSYour Name  *
5152*5113495bSYour Name  * Return: length of rtap_len updated.
5153*5113495bSYour Name  */
5154*5113495bSYour Name static unsigned int
qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status * rx_status,int8_t * rtap_buf,uint32_t rtap_len)5155*5113495bSYour Name qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status,
5156*5113495bSYour Name 				     int8_t *rtap_buf, uint32_t rtap_len)
5157*5113495bSYour Name {
5158*5113495bSYour Name 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
5159*5113495bSYour Name 
5160*5113495bSYour Name 	rtap_len = qdf_align(rtap_len, 2);
5161*5113495bSYour Name 
5162*5113495bSYour Name 	/*
5163*5113495bSYour Name 	 * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4]
5164*5113495bSYour Name 	 * Enable all "known" he-mu radiotap flags for now
5165*5113495bSYour Name 	 */
5166*5113495bSYour Name 
5167*5113495bSYour Name 	if (!rx_user_status) {
5168*5113495bSYour Name 		put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
5169*5113495bSYour Name 		rtap_len += 2;
5170*5113495bSYour Name 
5171*5113495bSYour Name 		put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
5172*5113495bSYour Name 		rtap_len += 2;
5173*5113495bSYour Name 
5174*5113495bSYour Name 		rtap_buf[rtap_len] = rx_status->he_RU[0];
5175*5113495bSYour Name 		rtap_len += 1;
5176*5113495bSYour Name 
5177*5113495bSYour Name 		rtap_buf[rtap_len] = rx_status->he_RU[1];
5178*5113495bSYour Name 		rtap_len += 1;
5179*5113495bSYour Name 
5180*5113495bSYour Name 		rtap_buf[rtap_len] = rx_status->he_RU[2];
5181*5113495bSYour Name 		rtap_len += 1;
5182*5113495bSYour Name 
5183*5113495bSYour Name 		rtap_buf[rtap_len] = rx_status->he_RU[3];
5184*5113495bSYour Name 		rtap_len += 1;
5185*5113495bSYour Name 	} else {
5186*5113495bSYour Name 		put_unaligned_le16(rx_user_status->he_flags1,
5187*5113495bSYour Name 				   &rtap_buf[rtap_len]);
5188*5113495bSYour Name 		rtap_len += 2;
5189*5113495bSYour Name 
5190*5113495bSYour Name 		put_unaligned_le16(rx_user_status->he_flags2,
5191*5113495bSYour Name 				   &rtap_buf[rtap_len]);
5192*5113495bSYour Name 		rtap_len += 2;
5193*5113495bSYour Name 
5194*5113495bSYour Name 		rtap_buf[rtap_len] = rx_user_status->he_RU[0];
5195*5113495bSYour Name 		rtap_len += 1;
5196*5113495bSYour Name 
5197*5113495bSYour Name 		rtap_buf[rtap_len] = rx_user_status->he_RU[1];
5198*5113495bSYour Name 		rtap_len += 1;
5199*5113495bSYour Name 
5200*5113495bSYour Name 		rtap_buf[rtap_len] = rx_user_status->he_RU[2];
5201*5113495bSYour Name 		rtap_len += 1;
5202*5113495bSYour Name 
5203*5113495bSYour Name 		rtap_buf[rtap_len] = rx_user_status->he_RU[3];
5204*5113495bSYour Name 		rtap_len += 1;
5205*5113495bSYour Name 		qdf_debug("he_flags %x %x he-RU %x %x %x %x",
5206*5113495bSYour Name 			  rx_user_status->he_flags1,
5207*5113495bSYour Name 			  rx_user_status->he_flags2, rx_user_status->he_RU[0],
5208*5113495bSYour Name 			  rx_user_status->he_RU[1], rx_user_status->he_RU[2],
5209*5113495bSYour Name 			  rx_user_status->he_RU[3]);
5210*5113495bSYour Name 	}
5211*5113495bSYour Name 
5212*5113495bSYour Name 	return rtap_len;
5213*5113495bSYour Name }
5214*5113495bSYour Name 
5215*5113495bSYour Name /**
5216*5113495bSYour Name  * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags
5217*5113495bSYour Name  * @rx_status: Pointer to rx_status.
5218*5113495bSYour Name  * @rtap_buf: buffer to which radiotap has to be updated
5219*5113495bSYour Name  * @rtap_len: radiotap length
5220*5113495bSYour Name  *
5221*5113495bSYour Name  * API update he-mu-other fields in the radiotap header
5222*5113495bSYour Name  *
5223*5113495bSYour Name  * Return: length of rtap_len updated.
5224*5113495bSYour Name  */
5225*5113495bSYour Name static unsigned int
qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status * rx_status,int8_t * rtap_buf,uint32_t rtap_len)5226*5113495bSYour Name qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status,
5227*5113495bSYour Name 				     int8_t *rtap_buf, uint32_t rtap_len)
5228*5113495bSYour Name {
5229*5113495bSYour Name 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
5230*5113495bSYour Name 
5231*5113495bSYour Name 	rtap_len = qdf_align(rtap_len, 2);
5232*5113495bSYour Name 
5233*5113495bSYour Name 	/*
5234*5113495bSYour Name 	 * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8
5235*5113495bSYour Name 	 * Enable all "known" he-mu-other radiotap flags for now
5236*5113495bSYour Name 	 */
5237*5113495bSYour Name 	if (!rx_user_status) {
5238*5113495bSYour Name 		put_unaligned_le16(rx_status->he_per_user_1,
5239*5113495bSYour Name 				   &rtap_buf[rtap_len]);
5240*5113495bSYour Name 		rtap_len += 2;
5241*5113495bSYour Name 
5242*5113495bSYour Name 		put_unaligned_le16(rx_status->he_per_user_2,
5243*5113495bSYour Name 				   &rtap_buf[rtap_len]);
5244*5113495bSYour Name 		rtap_len += 2;
5245*5113495bSYour Name 
5246*5113495bSYour Name 		rtap_buf[rtap_len] = rx_status->he_per_user_position;
5247*5113495bSYour Name 		rtap_len += 1;
5248*5113495bSYour Name 
5249*5113495bSYour Name 		rtap_buf[rtap_len] = rx_status->he_per_user_known;
5250*5113495bSYour Name 		rtap_len += 1;
5251*5113495bSYour Name 	} else {
5252*5113495bSYour Name 		put_unaligned_le16(rx_user_status->he_per_user_1,
5253*5113495bSYour Name 				   &rtap_buf[rtap_len]);
5254*5113495bSYour Name 		rtap_len += 2;
5255*5113495bSYour Name 
5256*5113495bSYour Name 		put_unaligned_le16(rx_user_status->he_per_user_2,
5257*5113495bSYour Name 				   &rtap_buf[rtap_len]);
5258*5113495bSYour Name 		rtap_len += 2;
5259*5113495bSYour Name 
5260*5113495bSYour Name 		rtap_buf[rtap_len] = rx_user_status->he_per_user_position;
5261*5113495bSYour Name 		rtap_len += 1;
5262*5113495bSYour Name 
5263*5113495bSYour Name 		rtap_buf[rtap_len] = rx_user_status->he_per_user_known;
5264*5113495bSYour Name 		rtap_len += 1;
5265*5113495bSYour Name 	}
5266*5113495bSYour Name 
5267*5113495bSYour Name 	return rtap_len;
5268*5113495bSYour Name }
5269*5113495bSYour Name 
5270*5113495bSYour Name /**
5271*5113495bSYour Name  * qdf_nbuf_update_radiotap_usig_flags() - Update radiotap header with USIG data
5272*5113495bSYour Name  *						from rx_status
5273*5113495bSYour Name  * @rx_status: Pointer to rx_status.
5274*5113495bSYour Name  * @rtap_buf: buffer to which radiotap has to be updated
5275*5113495bSYour Name  * @rtap_len: radiotap length
5276*5113495bSYour Name  *
5277*5113495bSYour Name  * API update Extra High Throughput (11be) fields in the radiotap header
5278*5113495bSYour Name  *
5279*5113495bSYour Name  * Return: length of rtap_len updated.
5280*5113495bSYour Name  */
5281*5113495bSYour Name static unsigned int
qdf_nbuf_update_radiotap_usig_flags(struct mon_rx_status * rx_status,int8_t * rtap_buf,uint32_t rtap_len)5282*5113495bSYour Name qdf_nbuf_update_radiotap_usig_flags(struct mon_rx_status *rx_status,
5283*5113495bSYour Name 				    int8_t *rtap_buf, uint32_t rtap_len)
5284*5113495bSYour Name {
5285*5113495bSYour Name 	/*
5286*5113495bSYour Name 	 * IEEE80211_RADIOTAP_USIG:
5287*5113495bSYour Name 	 *		u32, u32, u32
5288*5113495bSYour Name 	 */
5289*5113495bSYour Name 	rtap_len = qdf_align(rtap_len, 4);
5290*5113495bSYour Name 
5291*5113495bSYour Name 	put_unaligned_le32(rx_status->usig_common, &rtap_buf[rtap_len]);
5292*5113495bSYour Name 	rtap_len += 4;
5293*5113495bSYour Name 
5294*5113495bSYour Name 	put_unaligned_le32(rx_status->usig_value, &rtap_buf[rtap_len]);
5295*5113495bSYour Name 	rtap_len += 4;
5296*5113495bSYour Name 
5297*5113495bSYour Name 	put_unaligned_le32(rx_status->usig_mask, &rtap_buf[rtap_len]);
5298*5113495bSYour Name 	rtap_len += 4;
5299*5113495bSYour Name 
5300*5113495bSYour Name 	qdf_rl_debug("U-SIG data %x %x %x",
5301*5113495bSYour Name 		     rx_status->usig_common, rx_status->usig_value,
5302*5113495bSYour Name 		     rx_status->usig_mask);
5303*5113495bSYour Name 
5304*5113495bSYour Name 	return rtap_len;
5305*5113495bSYour Name }
5306*5113495bSYour Name 
5307*5113495bSYour Name /**
5308*5113495bSYour Name  * qdf_nbuf_update_radiotap_eht_flags() - Update radiotap header with EHT data
5309*5113495bSYour Name  *					from rx_status
5310*5113495bSYour Name  * @rx_status: Pointer to rx_status.
5311*5113495bSYour Name  * @rtap_buf: buffer to which radiotap has to be updated
5312*5113495bSYour Name  * @rtap_len: radiotap length
5313*5113495bSYour Name  *
5314*5113495bSYour Name  * API update Extra High Throughput (11be) fields in the radiotap header
5315*5113495bSYour Name  *
5316*5113495bSYour Name  * Return: length of rtap_len updated.
5317*5113495bSYour Name  */
5318*5113495bSYour Name static unsigned int
qdf_nbuf_update_radiotap_eht_flags(struct mon_rx_status * rx_status,int8_t * rtap_buf,uint32_t rtap_len)5319*5113495bSYour Name qdf_nbuf_update_radiotap_eht_flags(struct mon_rx_status *rx_status,
5320*5113495bSYour Name 				   int8_t *rtap_buf, uint32_t rtap_len)
5321*5113495bSYour Name {
5322*5113495bSYour Name 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
5323*5113495bSYour Name 	/*
5324*5113495bSYour Name 	 * IEEE80211_RADIOTAP_EHT:
5325*5113495bSYour Name 	 *		u32, u32, u32, u32, u32, u32, u32, u16, [u32, u32, u32]
5326*5113495bSYour Name 	 */
5327*5113495bSYour Name 	rtap_len = qdf_align(rtap_len, 4);
5328*5113495bSYour Name 
5329*5113495bSYour Name 	if (!rx_user_status) {
5330*5113495bSYour Name 		put_unaligned_le32(rx_status->eht_known, &rtap_buf[rtap_len]);
5331*5113495bSYour Name 		rtap_len += 4;
5332*5113495bSYour Name 
5333*5113495bSYour Name 		put_unaligned_le32(rx_status->eht_data[0], &rtap_buf[rtap_len]);
5334*5113495bSYour Name 		rtap_len += 4;
5335*5113495bSYour Name 
5336*5113495bSYour Name 		put_unaligned_le32(rx_status->eht_data[1], &rtap_buf[rtap_len]);
5337*5113495bSYour Name 		rtap_len += 4;
5338*5113495bSYour Name 	} else {
5339*5113495bSYour Name 		put_unaligned_le32(rx_status->eht_known |
5340*5113495bSYour Name 				   rx_user_status->eht_known,
5341*5113495bSYour Name 				   &rtap_buf[rtap_len]);
5342*5113495bSYour Name 		rtap_len += 4;
5343*5113495bSYour Name 
5344*5113495bSYour Name 		put_unaligned_le32(rx_status->eht_data[0] |
5345*5113495bSYour Name 				   rx_user_status->eht_data[0],
5346*5113495bSYour Name 				   &rtap_buf[rtap_len]);
5347*5113495bSYour Name 		rtap_len += 4;
5348*5113495bSYour Name 
5349*5113495bSYour Name 		put_unaligned_le32(rx_status->eht_data[1] |
5350*5113495bSYour Name 				   rx_user_status->eht_data[1],
5351*5113495bSYour Name 				   &rtap_buf[rtap_len]);
5352*5113495bSYour Name 		rtap_len += 4;
5353*5113495bSYour Name 	}
5354*5113495bSYour Name 
5355*5113495bSYour Name 	put_unaligned_le32(rx_status->eht_data[2], &rtap_buf[rtap_len]);
5356*5113495bSYour Name 	rtap_len += 4;
5357*5113495bSYour Name 
5358*5113495bSYour Name 	put_unaligned_le32(rx_status->eht_data[3], &rtap_buf[rtap_len]);
5359*5113495bSYour Name 	rtap_len += 4;
5360*5113495bSYour Name 
5361*5113495bSYour Name 	put_unaligned_le32(rx_status->eht_data[4], &rtap_buf[rtap_len]);
5362*5113495bSYour Name 	rtap_len += 4;
5363*5113495bSYour Name 
5364*5113495bSYour Name 	put_unaligned_le32(rx_status->eht_data[5], &rtap_buf[rtap_len]);
5365*5113495bSYour Name 	rtap_len += 4;
5366*5113495bSYour Name 
5367*5113495bSYour Name 	if (!rx_user_status) {
5368*5113495bSYour Name 		qdf_rl_debug("EHT data %x %x %x %x %x %x %x",
5369*5113495bSYour Name 			     rx_status->eht_known, rx_status->eht_data[0],
5370*5113495bSYour Name 			     rx_status->eht_data[1], rx_status->eht_data[2],
5371*5113495bSYour Name 			     rx_status->eht_data[3], rx_status->eht_data[4],
5372*5113495bSYour Name 			     rx_status->eht_data[5]);
5373*5113495bSYour Name 	} else {
5374*5113495bSYour Name 		put_unaligned_le32(rx_user_status->eht_user_info, &rtap_buf[rtap_len]);
5375*5113495bSYour Name 		rtap_len += 4;
5376*5113495bSYour Name 
5377*5113495bSYour Name 		qdf_rl_debug("EHT data %x %x %x %x %x %x %x",
5378*5113495bSYour Name 			     rx_status->eht_known | rx_user_status->eht_known,
5379*5113495bSYour Name 			     rx_status->eht_data[0] |
5380*5113495bSYour Name 			     rx_user_status->eht_data[0],
5381*5113495bSYour Name 			     rx_status->eht_data[1] |
5382*5113495bSYour Name 			     rx_user_status->eht_data[1],
5383*5113495bSYour Name 			     rx_status->eht_data[2], rx_status->eht_data[3],
5384*5113495bSYour Name 			     rx_status->eht_data[4], rx_status->eht_data[5]);
5385*5113495bSYour Name 	}
5386*5113495bSYour Name 
5387*5113495bSYour Name 	return rtap_len;
5388*5113495bSYour Name }
5389*5113495bSYour Name 
5390*5113495bSYour Name #define IEEE80211_RADIOTAP_TX_STATUS 0
5391*5113495bSYour Name #define IEEE80211_RADIOTAP_RETRY_COUNT 1
5392*5113495bSYour Name #define IEEE80211_RADIOTAP_EXTENSION2 2
5393*5113495bSYour Name uint8_t ATH_OUI[] = {0x00, 0x03, 0x7f}; /* Atheros OUI */
5394*5113495bSYour Name 
5395*5113495bSYour Name /**
5396*5113495bSYour Name  * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags
5397*5113495bSYour Name  * @rx_status: Pointer to rx_status.
5398*5113495bSYour Name  * @rtap_buf: Buf to which AMPDU info has to be updated.
5399*5113495bSYour Name  * @rtap_len: Current length of radiotap buffer
5400*5113495bSYour Name  *
5401*5113495bSYour Name  * Return: Length of radiotap after AMPDU flags updated.
5402*5113495bSYour Name  */
qdf_nbuf_update_radiotap_ampdu_flags(struct mon_rx_status * rx_status,uint8_t * rtap_buf,uint32_t rtap_len)5403*5113495bSYour Name static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
5404*5113495bSYour Name 					struct mon_rx_status *rx_status,
5405*5113495bSYour Name 					uint8_t *rtap_buf,
5406*5113495bSYour Name 					uint32_t rtap_len)
5407*5113495bSYour Name {
5408*5113495bSYour Name 	/*
5409*5113495bSYour Name 	 * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8
5410*5113495bSYour Name 	 * First 32 bits of AMPDU represents the reference number
5411*5113495bSYour Name 	 */
5412*5113495bSYour Name 
5413*5113495bSYour Name 	uint32_t ampdu_reference_num = rx_status->ppdu_id;
5414*5113495bSYour Name 	uint16_t ampdu_flags = 0;
5415*5113495bSYour Name 	uint16_t ampdu_reserved_flags = 0;
5416*5113495bSYour Name 
5417*5113495bSYour Name 	rtap_len = qdf_align(rtap_len, 4);
5418*5113495bSYour Name 
5419*5113495bSYour Name 	put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]);
5420*5113495bSYour Name 	rtap_len += 4;
5421*5113495bSYour Name 	put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]);
5422*5113495bSYour Name 	rtap_len += 2;
5423*5113495bSYour Name 	put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]);
5424*5113495bSYour Name 	rtap_len += 2;
5425*5113495bSYour Name 
5426*5113495bSYour Name 	return rtap_len;
5427*5113495bSYour Name }
5428*5113495bSYour Name 
5429*5113495bSYour Name #ifdef DP_MON_RSSI_IN_DBM
5430*5113495bSYour Name #define QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status) \
5431*5113495bSYour Name (rx_status->rssi_comb)
5432*5113495bSYour Name #else
5433*5113495bSYour Name #ifdef QCA_RSSI_DB2DBM
5434*5113495bSYour Name #define QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status) \
5435*5113495bSYour Name (((rx_status)->rssi_dbm_conv_support) ? \
5436*5113495bSYour Name ((rx_status)->rssi_comb + (rx_status)->rssi_offset) :\
5437*5113495bSYour Name ((rx_status)->rssi_comb + (rx_status)->chan_noise_floor))
5438*5113495bSYour Name #else
5439*5113495bSYour Name #define QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status) \
5440*5113495bSYour Name (rx_status->rssi_comb + rx_status->chan_noise_floor)
5441*5113495bSYour Name #endif
5442*5113495bSYour Name #endif
5443*5113495bSYour Name 
5444*5113495bSYour Name /**
5445*5113495bSYour Name  * qdf_nbuf_update_radiotap_tx_flags() - Update radiotap header tx flags
5446*5113495bSYour Name  * @rx_status: Pointer to rx_status.
5447*5113495bSYour Name  * @rtap_buf: Buf to which tx info has to be updated.
5448*5113495bSYour Name  * @rtap_len: Current length of radiotap buffer
5449*5113495bSYour Name  *
5450*5113495bSYour Name  * Return: Length of radiotap after tx flags updated.
5451*5113495bSYour Name  */
qdf_nbuf_update_radiotap_tx_flags(struct mon_rx_status * rx_status,uint8_t * rtap_buf,uint32_t rtap_len)5452*5113495bSYour Name static unsigned int qdf_nbuf_update_radiotap_tx_flags(
5453*5113495bSYour Name 						struct mon_rx_status *rx_status,
5454*5113495bSYour Name 						uint8_t *rtap_buf,
5455*5113495bSYour Name 						uint32_t rtap_len)
5456*5113495bSYour Name {
5457*5113495bSYour Name 	/*
5458*5113495bSYour Name 	 * IEEE80211_RADIOTAP_TX_FLAGS u16
5459*5113495bSYour Name 	 */
5460*5113495bSYour Name 
5461*5113495bSYour Name 	uint16_t tx_flags = 0;
5462*5113495bSYour Name 
5463*5113495bSYour Name 	rtap_len = qdf_align(rtap_len, 2);
5464*5113495bSYour Name 
5465*5113495bSYour Name 	switch (rx_status->tx_status) {
5466*5113495bSYour Name 	case RADIOTAP_TX_STATUS_FAIL:
5467*5113495bSYour Name 		tx_flags |= IEEE80211_RADIOTAP_F_TX_FAIL;
5468*5113495bSYour Name 		break;
5469*5113495bSYour Name 	case RADIOTAP_TX_STATUS_NOACK:
5470*5113495bSYour Name 		tx_flags |= IEEE80211_RADIOTAP_F_TX_NOACK;
5471*5113495bSYour Name 		break;
5472*5113495bSYour Name 	}
5473*5113495bSYour Name 	put_unaligned_le16(tx_flags, &rtap_buf[rtap_len]);
5474*5113495bSYour Name 	rtap_len += 2;
5475*5113495bSYour Name 
5476*5113495bSYour Name 	return rtap_len;
5477*5113495bSYour Name }
5478*5113495bSYour Name 
qdf_nbuf_update_radiotap(struct mon_rx_status * rx_status,qdf_nbuf_t nbuf,uint32_t headroom_sz)5479*5113495bSYour Name unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
5480*5113495bSYour Name 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
5481*5113495bSYour Name {
5482*5113495bSYour Name 	uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0};
5483*5113495bSYour Name 	struct ieee80211_radiotap_header *rthdr =
5484*5113495bSYour Name 		(struct ieee80211_radiotap_header *)rtap_buf;
5485*5113495bSYour Name 	uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header);
5486*5113495bSYour Name 	uint32_t rtap_len = rtap_hdr_len;
5487*5113495bSYour Name 	uint8_t length = rtap_len;
5488*5113495bSYour Name 	struct qdf_radiotap_vendor_ns_ath *radiotap_vendor_ns_ath;
5489*5113495bSYour Name 	struct qdf_radiotap_ext2 *rtap_ext2;
5490*5113495bSYour Name 	struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
5491*5113495bSYour Name 
5492*5113495bSYour Name 	/* per user info */
5493*5113495bSYour Name 	qdf_le32_t *it_present;
5494*5113495bSYour Name 	uint32_t it_present_val;
5495*5113495bSYour Name 	bool radiotap_ext1_hdr_present = false;
5496*5113495bSYour Name 
5497*5113495bSYour Name 	it_present = &rthdr->it_present;
5498*5113495bSYour Name 
5499*5113495bSYour Name 	/* Adding Extended Header space */
5500*5113495bSYour Name 	if (rx_status->add_rtap_ext || rx_status->add_rtap_ext2 ||
5501*5113495bSYour Name 	    rx_status->usig_flags || rx_status->eht_flags) {
5502*5113495bSYour Name 		rtap_hdr_len += RADIOTAP_HEADER_EXT_LEN;
5503*5113495bSYour Name 		rtap_len = rtap_hdr_len;
5504*5113495bSYour Name 		radiotap_ext1_hdr_present = true;
5505*5113495bSYour Name 	}
5506*5113495bSYour Name 
5507*5113495bSYour Name 	length = rtap_len;
5508*5113495bSYour Name 
5509*5113495bSYour Name 	/* IEEE80211_RADIOTAP_TSFT              __le64       microseconds*/
5510*5113495bSYour Name 	it_present_val = (1 << IEEE80211_RADIOTAP_TSFT);
5511*5113495bSYour Name 	put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]);
5512*5113495bSYour Name 	rtap_len += 8;
5513*5113495bSYour Name 
5514*5113495bSYour Name 	/* IEEE80211_RADIOTAP_FLAGS u8 */
5515*5113495bSYour Name 	it_present_val |= (1 << IEEE80211_RADIOTAP_FLAGS);
5516*5113495bSYour Name 
5517*5113495bSYour Name 	if (rx_status->rs_fcs_err)
5518*5113495bSYour Name 		rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS;
5519*5113495bSYour Name 
5520*5113495bSYour Name 	rtap_buf[rtap_len] = rx_status->rtap_flags;
5521*5113495bSYour Name 	rtap_len += 1;
5522*5113495bSYour Name 
5523*5113495bSYour Name 	/* IEEE80211_RADIOTAP_RATE  u8           500kb/s */
5524*5113495bSYour Name 	if (!rx_status->ht_flags && !rx_status->vht_flags &&
5525*5113495bSYour Name 	    !rx_status->he_flags && !rx_status->eht_flags) {
5526*5113495bSYour Name 		it_present_val |= (1 << IEEE80211_RADIOTAP_RATE);
5527*5113495bSYour Name 		rtap_buf[rtap_len] = rx_status->rate;
5528*5113495bSYour Name 	} else
5529*5113495bSYour Name 		rtap_buf[rtap_len] = 0;
5530*5113495bSYour Name 	rtap_len += 1;
5531*5113495bSYour Name 
5532*5113495bSYour Name 	/* IEEE80211_RADIOTAP_CHANNEL 2 x __le16   MHz, bitmap */
5533*5113495bSYour Name 	it_present_val |= (1 << IEEE80211_RADIOTAP_CHANNEL);
5534*5113495bSYour Name 	put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]);
5535*5113495bSYour Name 	rtap_len += 2;
5536*5113495bSYour Name 	/* Channel flags. */
5537*5113495bSYour Name 	if (rx_status->chan_freq > CHANNEL_FREQ_5150)
5538*5113495bSYour Name 		rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL;
5539*5113495bSYour Name 	else
5540*5113495bSYour Name 		rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL;
5541*5113495bSYour Name 	if (rx_status->cck_flag)
5542*5113495bSYour Name 		rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL;
5543*5113495bSYour Name 	if (rx_status->ofdm_flag)
5544*5113495bSYour Name 		rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL;
5545*5113495bSYour Name 	put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]);
5546*5113495bSYour Name 	rtap_len += 2;
5547*5113495bSYour Name 
5548*5113495bSYour Name 	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8  decibels from one milliwatt
5549*5113495bSYour Name 	 *					(dBm)
5550*5113495bSYour Name 	 */
5551*5113495bSYour Name 	it_present_val |= (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
5552*5113495bSYour Name 	/*
5553*5113495bSYour Name 	 * rssi_comb is int dB, need to convert it to dBm.
5554*5113495bSYour Name 	 * normalize value to noise floor of -96 dBm
5555*5113495bSYour Name 	 */
5556*5113495bSYour Name 	rtap_buf[rtap_len] = QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status);
5557*5113495bSYour Name 	rtap_len += 1;
5558*5113495bSYour Name 
5559*5113495bSYour Name 	/* RX signal noise floor */
5560*5113495bSYour Name 	it_present_val |= (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
5561*5113495bSYour Name 	rtap_buf[rtap_len] = (uint8_t)rx_status->chan_noise_floor;
5562*5113495bSYour Name 	rtap_len += 1;
5563*5113495bSYour Name 
5564*5113495bSYour Name 	/* IEEE80211_RADIOTAP_ANTENNA   u8      antenna index */
5565*5113495bSYour Name 	it_present_val |= (1 << IEEE80211_RADIOTAP_ANTENNA);
5566*5113495bSYour Name 	rtap_buf[rtap_len] = rx_status->nr_ant;
5567*5113495bSYour Name 	rtap_len += 1;
5568*5113495bSYour Name 
5569*5113495bSYour Name 	if ((rtap_len - length) > RADIOTAP_FIXED_HEADER_LEN) {
5570*5113495bSYour Name 		qdf_print("length is greater than RADIOTAP_FIXED_HEADER_LEN");
5571*5113495bSYour Name 		return 0;
5572*5113495bSYour Name 	}
5573*5113495bSYour Name 
5574*5113495bSYour Name 	/* update tx flags for pkt capture*/
5575*5113495bSYour Name 	if (rx_status->add_rtap_ext) {
5576*5113495bSYour Name 		it_present_val |=
5577*5113495bSYour Name 			cpu_to_le32(1 << IEEE80211_RADIOTAP_TX_FLAGS);
5578*5113495bSYour Name 		rtap_len = qdf_nbuf_update_radiotap_tx_flags(rx_status,
5579*5113495bSYour Name 							     rtap_buf,
5580*5113495bSYour Name 							     rtap_len);
5581*5113495bSYour Name 
5582*5113495bSYour Name 		if ((rtap_len - length) > RADIOTAP_TX_FLAGS_LEN) {
5583*5113495bSYour Name 			qdf_print("length is greater than RADIOTAP_TX_FLAGS_LEN");
5584*5113495bSYour Name 			return 0;
5585*5113495bSYour Name 		}
5586*5113495bSYour Name 	}
5587*5113495bSYour Name 
5588*5113495bSYour Name 	if (rx_status->ht_flags) {
5589*5113495bSYour Name 		length = rtap_len;
5590*5113495bSYour Name 		/* IEEE80211_RADIOTAP_VHT u8, u8, u8 */
5591*5113495bSYour Name 		it_present_val |= (1 << IEEE80211_RADIOTAP_MCS);
5592*5113495bSYour Name 		rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW |
5593*5113495bSYour Name 					IEEE80211_RADIOTAP_MCS_HAVE_MCS |
5594*5113495bSYour Name 					IEEE80211_RADIOTAP_MCS_HAVE_GI;
5595*5113495bSYour Name 		rtap_len += 1;
5596*5113495bSYour Name 
5597*5113495bSYour Name 		if (rx_status->sgi)
5598*5113495bSYour Name 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI;
5599*5113495bSYour Name 		if (rx_status->bw)
5600*5113495bSYour Name 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40;
5601*5113495bSYour Name 		else
5602*5113495bSYour Name 			rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20;
5603*5113495bSYour Name 		rtap_len += 1;
5604*5113495bSYour Name 
5605*5113495bSYour Name 		rtap_buf[rtap_len] = rx_status->ht_mcs;
5606*5113495bSYour Name 		rtap_len += 1;
5607*5113495bSYour Name 
5608*5113495bSYour Name 		if ((rtap_len - length) > RADIOTAP_HT_FLAGS_LEN) {
5609*5113495bSYour Name 			qdf_print("length is greater than RADIOTAP_HT_FLAGS_LEN");
5610*5113495bSYour Name 			return 0;
5611*5113495bSYour Name 		}
5612*5113495bSYour Name 	}
5613*5113495bSYour Name 
5614*5113495bSYour Name 	if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) {
5615*5113495bSYour Name 		/* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */
5616*5113495bSYour Name 		it_present_val |= (1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
5617*5113495bSYour Name 		rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status,
5618*5113495bSYour Name 								rtap_buf,
5619*5113495bSYour Name 								rtap_len);
5620*5113495bSYour Name 	}
5621*5113495bSYour Name 
5622*5113495bSYour Name 	if (rx_status->vht_flags) {
5623*5113495bSYour Name 		length = rtap_len;
5624*5113495bSYour Name 		/* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
5625*5113495bSYour Name 		it_present_val |= (1 << IEEE80211_RADIOTAP_VHT);
5626*5113495bSYour Name 		rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status,
5627*5113495bSYour Name 								rtap_buf,
5628*5113495bSYour Name 								rtap_len);
5629*5113495bSYour Name 
5630*5113495bSYour Name 		if ((rtap_len - length) > RADIOTAP_VHT_FLAGS_LEN) {
5631*5113495bSYour Name 			qdf_print("length is greater than RADIOTAP_VHT_FLAGS_LEN");
5632*5113495bSYour Name 			return 0;
5633*5113495bSYour Name 		}
5634*5113495bSYour Name 	}
5635*5113495bSYour Name 
5636*5113495bSYour Name 	if (rx_status->he_flags) {
5637*5113495bSYour Name 		length = rtap_len;
5638*5113495bSYour Name 		/* IEEE80211_RADIOTAP_HE */
5639*5113495bSYour Name 		it_present_val |= (1 << IEEE80211_RADIOTAP_HE);
5640*5113495bSYour Name 		rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status,
5641*5113495bSYour Name 								rtap_buf,
5642*5113495bSYour Name 								rtap_len);
5643*5113495bSYour Name 
5644*5113495bSYour Name 		if ((rtap_len - length) > RADIOTAP_HE_FLAGS_LEN) {
5645*5113495bSYour Name 			qdf_print("length is greater than RADIOTAP_HE_FLAGS_LEN");
5646*5113495bSYour Name 			return 0;
5647*5113495bSYour Name 		}
5648*5113495bSYour Name 	}
5649*5113495bSYour Name 
5650*5113495bSYour Name 	if (rx_status->he_mu_flags) {
5651*5113495bSYour Name 		length = rtap_len;
5652*5113495bSYour Name 		/* IEEE80211_RADIOTAP_HE-MU */
5653*5113495bSYour Name 		it_present_val |= (1 << IEEE80211_RADIOTAP_HE_MU);
5654*5113495bSYour Name 		rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status,
5655*5113495bSYour Name 								rtap_buf,
5656*5113495bSYour Name 								rtap_len);
5657*5113495bSYour Name 
5658*5113495bSYour Name 		if ((rtap_len - length) > RADIOTAP_HE_MU_FLAGS_LEN) {
5659*5113495bSYour Name 			qdf_print("length is greater than RADIOTAP_HE_MU_FLAGS_LEN");
5660*5113495bSYour Name 			return 0;
5661*5113495bSYour Name 		}
5662*5113495bSYour Name 	}
5663*5113495bSYour Name 
5664*5113495bSYour Name 	if (rx_status->he_mu_other_flags) {
5665*5113495bSYour Name 		length = rtap_len;
5666*5113495bSYour Name 		/* IEEE80211_RADIOTAP_HE-MU-OTHER */
5667*5113495bSYour Name 		it_present_val |= (1 << IEEE80211_RADIOTAP_HE_MU_OTHER);
5668*5113495bSYour Name 		rtap_len =
5669*5113495bSYour Name 			qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status,
5670*5113495bSYour Name 								rtap_buf,
5671*5113495bSYour Name 								rtap_len);
5672*5113495bSYour Name 
5673*5113495bSYour Name 		if ((rtap_len - length) > RADIOTAP_HE_MU_OTHER_FLAGS_LEN) {
5674*5113495bSYour Name 			qdf_print("length is greater than RADIOTAP_HE_MU_OTHER_FLAGS_LEN");
5675*5113495bSYour Name 			return 0;
5676*5113495bSYour Name 		}
5677*5113495bSYour Name 	}
5678*5113495bSYour Name 
5679*5113495bSYour Name 	rtap_len = qdf_align(rtap_len, 2);
5680*5113495bSYour Name 	/*
5681*5113495bSYour Name 	 * Radiotap Vendor Namespace
5682*5113495bSYour Name 	 */
5683*5113495bSYour Name 	it_present_val |= (1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
5684*5113495bSYour Name 	radiotap_vendor_ns_ath = (struct qdf_radiotap_vendor_ns_ath *)
5685*5113495bSYour Name 					(rtap_buf + rtap_len);
5686*5113495bSYour Name 	/*
5687*5113495bSYour Name 	 * Copy Atheros OUI - 3 bytes (4th byte is 0)
5688*5113495bSYour Name 	 */
5689*5113495bSYour Name 	qdf_mem_copy(radiotap_vendor_ns_ath->hdr.oui, ATH_OUI, sizeof(ATH_OUI));
5690*5113495bSYour Name 	/*
5691*5113495bSYour Name 	 * Name space selector = 0
5692*5113495bSYour Name 	 * We only will have one namespace for now
5693*5113495bSYour Name 	 */
5694*5113495bSYour Name 	radiotap_vendor_ns_ath->hdr.selector = 0;
5695*5113495bSYour Name 	radiotap_vendor_ns_ath->hdr.skip_length = cpu_to_le16(
5696*5113495bSYour Name 					sizeof(*radiotap_vendor_ns_ath) -
5697*5113495bSYour Name 					sizeof(radiotap_vendor_ns_ath->hdr));
5698*5113495bSYour Name 	radiotap_vendor_ns_ath->device_id = cpu_to_le32(rx_status->device_id);
5699*5113495bSYour Name 	radiotap_vendor_ns_ath->lsig = cpu_to_le32(rx_status->l_sig_a_info);
5700*5113495bSYour Name 	radiotap_vendor_ns_ath->lsig_b = cpu_to_le32(rx_status->l_sig_b_info);
5701*5113495bSYour Name 	radiotap_vendor_ns_ath->ppdu_start_timestamp =
5702*5113495bSYour Name 				cpu_to_le32(rx_status->ppdu_timestamp);
5703*5113495bSYour Name 	rtap_len += sizeof(*radiotap_vendor_ns_ath);
5704*5113495bSYour Name 
5705*5113495bSYour Name 	/* Move to next it_present */
5706*5113495bSYour Name 	if (radiotap_ext1_hdr_present) {
5707*5113495bSYour Name 		it_present_val |= (1 << IEEE80211_RADIOTAP_EXT);
5708*5113495bSYour Name 		put_unaligned_le32(it_present_val, it_present);
5709*5113495bSYour Name 		it_present_val = 0;
5710*5113495bSYour Name 		it_present++;
5711*5113495bSYour Name 	}
5712*5113495bSYour Name 
5713*5113495bSYour Name 	/* Add Extension to Radiotap Header & corresponding data */
5714*5113495bSYour Name 	if (rx_status->add_rtap_ext) {
5715*5113495bSYour Name 		it_present_val |= (1 << IEEE80211_RADIOTAP_TX_STATUS);
5716*5113495bSYour Name 		it_present_val |= (1 << IEEE80211_RADIOTAP_RETRY_COUNT);
5717*5113495bSYour Name 
5718*5113495bSYour Name 		rtap_buf[rtap_len] = rx_status->tx_status;
5719*5113495bSYour Name 		rtap_len += 1;
5720*5113495bSYour Name 		rtap_buf[rtap_len] = rx_status->tx_retry_cnt;
5721*5113495bSYour Name 		rtap_len += 1;
5722*5113495bSYour Name 	}
5723*5113495bSYour Name 
5724*5113495bSYour Name 	/* Add Extension2 to Radiotap Header */
5725*5113495bSYour Name 	if (rx_status->add_rtap_ext2) {
5726*5113495bSYour Name 		it_present_val |= (1 << IEEE80211_RADIOTAP_EXTENSION2);
5727*5113495bSYour Name 
5728*5113495bSYour Name 		rtap_ext2 = (struct qdf_radiotap_ext2 *)(rtap_buf + rtap_len);
5729*5113495bSYour Name 		rtap_ext2->ppdu_id = rx_status->ppdu_id;
5730*5113495bSYour Name 		rtap_ext2->prev_ppdu_id = rx_status->prev_ppdu_id;
5731*5113495bSYour Name 		if (!rx_user_status) {
5732*5113495bSYour Name 			rtap_ext2->tid = rx_status->tid;
5733*5113495bSYour Name 			rtap_ext2->start_seq = rx_status->start_seq;
5734*5113495bSYour Name 			qdf_mem_copy(rtap_ext2->ba_bitmap,
5735*5113495bSYour Name 				     rx_status->ba_bitmap,
5736*5113495bSYour Name 				     8 * (sizeof(uint32_t)));
5737*5113495bSYour Name 		} else {
5738*5113495bSYour Name 			uint8_t ba_bitmap_sz = rx_user_status->ba_bitmap_sz;
5739*5113495bSYour Name 
5740*5113495bSYour Name 			/* set default bitmap sz if not set */
5741*5113495bSYour Name 			ba_bitmap_sz = ba_bitmap_sz ? ba_bitmap_sz : 8;
5742*5113495bSYour Name 			rtap_ext2->tid = rx_user_status->tid;
5743*5113495bSYour Name 			rtap_ext2->start_seq = rx_user_status->start_seq;
5744*5113495bSYour Name 			qdf_mem_copy(rtap_ext2->ba_bitmap,
5745*5113495bSYour Name 				     rx_user_status->ba_bitmap,
5746*5113495bSYour Name 				     ba_bitmap_sz * (sizeof(uint32_t)));
5747*5113495bSYour Name 		}
5748*5113495bSYour Name 
5749*5113495bSYour Name 		rtap_len += sizeof(*rtap_ext2);
5750*5113495bSYour Name 	}
5751*5113495bSYour Name 
5752*5113495bSYour Name 	if (rx_status->usig_flags) {
5753*5113495bSYour Name 		length = rtap_len;
5754*5113495bSYour Name 		/* IEEE80211_RADIOTAP_USIG */
5755*5113495bSYour Name 		it_present_val |= (1 << IEEE80211_RADIOTAP_EXT1_USIG);
5756*5113495bSYour Name 		rtap_len = qdf_nbuf_update_radiotap_usig_flags(rx_status,
5757*5113495bSYour Name 							       rtap_buf,
5758*5113495bSYour Name 							       rtap_len);
5759*5113495bSYour Name 
5760*5113495bSYour Name 		if ((rtap_len - length) > RADIOTAP_EHT_FLAGS_LEN) {
5761*5113495bSYour Name 			qdf_print("length is greater than RADIOTAP_EHT_FLAGS_LEN");
5762*5113495bSYour Name 			return 0;
5763*5113495bSYour Name 		}
5764*5113495bSYour Name 	}
5765*5113495bSYour Name 
5766*5113495bSYour Name 	if (rx_status->eht_flags) {
5767*5113495bSYour Name 		length = rtap_len;
5768*5113495bSYour Name 		/* IEEE80211_RADIOTAP_EHT */
5769*5113495bSYour Name 		it_present_val |= (1 << IEEE80211_RADIOTAP_EXT1_EHT);
5770*5113495bSYour Name 		rtap_len = qdf_nbuf_update_radiotap_eht_flags(rx_status,
5771*5113495bSYour Name 							      rtap_buf,
5772*5113495bSYour Name 							      rtap_len);
5773*5113495bSYour Name 
5774*5113495bSYour Name 		if ((rtap_len - length) > RADIOTAP_EHT_FLAGS_LEN) {
5775*5113495bSYour Name 			qdf_print("length is greater than RADIOTAP_EHT_FLAGS_LEN");
5776*5113495bSYour Name 			return 0;
5777*5113495bSYour Name 		}
5778*5113495bSYour Name 	}
5779*5113495bSYour Name 
5780*5113495bSYour Name 	put_unaligned_le32(it_present_val, it_present);
5781*5113495bSYour Name 	rthdr->it_len = cpu_to_le16(rtap_len);
5782*5113495bSYour Name 
5783*5113495bSYour Name 	if (headroom_sz < rtap_len) {
5784*5113495bSYour Name 		qdf_debug("DEBUG: Not enough space to update radiotap");
5785*5113495bSYour Name 		return 0;
5786*5113495bSYour Name 	}
5787*5113495bSYour Name 
5788*5113495bSYour Name 	qdf_nbuf_push_head(nbuf, rtap_len);
5789*5113495bSYour Name 	qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len);
5790*5113495bSYour Name 	return rtap_len;
5791*5113495bSYour Name }
5792*5113495bSYour Name #else
qdf_nbuf_update_radiotap_vht_flags(struct mon_rx_status * rx_status,int8_t * rtap_buf,uint32_t rtap_len)5793*5113495bSYour Name static unsigned int qdf_nbuf_update_radiotap_vht_flags(
5794*5113495bSYour Name 					struct mon_rx_status *rx_status,
5795*5113495bSYour Name 					int8_t *rtap_buf,
5796*5113495bSYour Name 					uint32_t rtap_len)
5797*5113495bSYour Name {
5798*5113495bSYour Name 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
5799*5113495bSYour Name 	return 0;
5800*5113495bSYour Name }
5801*5113495bSYour Name 
qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status * rx_status,int8_t * rtap_buf,uint32_t rtap_len)5802*5113495bSYour Name unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
5803*5113495bSYour Name 				      int8_t *rtap_buf, uint32_t rtap_len)
5804*5113495bSYour Name {
5805*5113495bSYour Name 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
5806*5113495bSYour Name 	return 0;
5807*5113495bSYour Name }
5808*5113495bSYour Name 
qdf_nbuf_update_radiotap_ampdu_flags(struct mon_rx_status * rx_status,uint8_t * rtap_buf,uint32_t rtap_len)5809*5113495bSYour Name static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
5810*5113495bSYour Name 					struct mon_rx_status *rx_status,
5811*5113495bSYour Name 					uint8_t *rtap_buf,
5812*5113495bSYour Name 					uint32_t rtap_len)
5813*5113495bSYour Name {
5814*5113495bSYour Name 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
5815*5113495bSYour Name 	return 0;
5816*5113495bSYour Name }
5817*5113495bSYour Name 
qdf_nbuf_update_radiotap(struct mon_rx_status * rx_status,qdf_nbuf_t nbuf,uint32_t headroom_sz)5818*5113495bSYour Name unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
5819*5113495bSYour Name 				      qdf_nbuf_t nbuf, uint32_t headroom_sz)
5820*5113495bSYour Name {
5821*5113495bSYour Name 	qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
5822*5113495bSYour Name 	return 0;
5823*5113495bSYour Name }
5824*5113495bSYour Name #endif
5825*5113495bSYour Name qdf_export_symbol(qdf_nbuf_update_radiotap);
5826*5113495bSYour Name 
__qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)5827*5113495bSYour Name void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)
5828*5113495bSYour Name {
5829*5113495bSYour Name 	nbuf_free_cb = cb_func_ptr;
5830*5113495bSYour Name }
5831*5113495bSYour Name 
5832*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_reg_free_cb);
5833*5113495bSYour Name 
qdf_nbuf_classify_pkt(struct sk_buff * skb)5834*5113495bSYour Name void qdf_nbuf_classify_pkt(struct sk_buff *skb)
5835*5113495bSYour Name {
5836*5113495bSYour Name 	struct ethhdr *eh = (struct ethhdr *)skb->data;
5837*5113495bSYour Name 
5838*5113495bSYour Name 	/* check destination mac address is broadcast/multicast */
5839*5113495bSYour Name 	if (is_broadcast_ether_addr((uint8_t *)eh))
5840*5113495bSYour Name 		QDF_NBUF_CB_SET_BCAST(skb);
5841*5113495bSYour Name 	else if (is_multicast_ether_addr((uint8_t *)eh))
5842*5113495bSYour Name 		QDF_NBUF_CB_SET_MCAST(skb);
5843*5113495bSYour Name 
5844*5113495bSYour Name 	if (qdf_nbuf_is_ipv4_arp_pkt(skb))
5845*5113495bSYour Name 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
5846*5113495bSYour Name 			QDF_NBUF_CB_PACKET_TYPE_ARP;
5847*5113495bSYour Name 	else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
5848*5113495bSYour Name 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
5849*5113495bSYour Name 			QDF_NBUF_CB_PACKET_TYPE_DHCP;
5850*5113495bSYour Name 	else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
5851*5113495bSYour Name 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
5852*5113495bSYour Name 			QDF_NBUF_CB_PACKET_TYPE_EAPOL;
5853*5113495bSYour Name 	else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
5854*5113495bSYour Name 		QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
5855*5113495bSYour Name 			QDF_NBUF_CB_PACKET_TYPE_WAPI;
5856*5113495bSYour Name }
5857*5113495bSYour Name qdf_export_symbol(qdf_nbuf_classify_pkt);
5858*5113495bSYour Name 
__qdf_nbuf_init(__qdf_nbuf_t nbuf)5859*5113495bSYour Name void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
5860*5113495bSYour Name {
5861*5113495bSYour Name 	qdf_nbuf_users_set(&nbuf->users, 1);
5862*5113495bSYour Name 	nbuf->data = nbuf->head + NET_SKB_PAD;
5863*5113495bSYour Name 	skb_reset_tail_pointer(nbuf);
5864*5113495bSYour Name }
5865*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_init);
5866*5113495bSYour Name 
5867*5113495bSYour Name #ifdef WLAN_FEATURE_FASTPATH
qdf_nbuf_init_fast(qdf_nbuf_t nbuf)5868*5113495bSYour Name void qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
5869*5113495bSYour Name {
5870*5113495bSYour Name 	qdf_nbuf_users_set(&nbuf->users, 1);
5871*5113495bSYour Name 	skb_reset_tail_pointer(nbuf);
5872*5113495bSYour Name }
5873*5113495bSYour Name qdf_export_symbol(qdf_nbuf_init_fast);
5874*5113495bSYour Name #endif /* WLAN_FEATURE_FASTPATH */
5875*5113495bSYour Name 
5876*5113495bSYour Name 
5877*5113495bSYour Name #ifdef QDF_NBUF_GLOBAL_COUNT
__qdf_nbuf_mod_init(void)5878*5113495bSYour Name void __qdf_nbuf_mod_init(void)
5879*5113495bSYour Name {
5880*5113495bSYour Name 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
5881*5113495bSYour Name 	qdf_atomic_init(&nbuf_count);
5882*5113495bSYour Name 	qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count);
5883*5113495bSYour Name }
5884*5113495bSYour Name 
__qdf_nbuf_mod_exit(void)5885*5113495bSYour Name void __qdf_nbuf_mod_exit(void)
5886*5113495bSYour Name {
5887*5113495bSYour Name }
5888*5113495bSYour Name #endif
5889*5113495bSYour Name 
5890*5113495bSYour Name #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
__qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf,uint8_t idx,int offset)5891*5113495bSYour Name QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
5892*5113495bSYour Name 					    int offset)
5893*5113495bSYour Name {
5894*5113495bSYour Name 	unsigned int frag_offset;
5895*5113495bSYour Name 	skb_frag_t *frag;
5896*5113495bSYour Name 
5897*5113495bSYour Name 	if (qdf_unlikely(idx >= __qdf_nbuf_get_nr_frags(nbuf)))
5898*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
5899*5113495bSYour Name 
5900*5113495bSYour Name 	frag = &skb_shinfo(nbuf)->frags[idx];
5901*5113495bSYour Name 	frag_offset = skb_frag_off(frag);
5902*5113495bSYour Name 
5903*5113495bSYour Name 	frag_offset += offset;
5904*5113495bSYour Name 	skb_frag_off_set(frag, frag_offset);
5905*5113495bSYour Name 
5906*5113495bSYour Name 	__qdf_nbuf_trim_add_frag_size(nbuf, idx, -(offset), 0);
5907*5113495bSYour Name 
5908*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
5909*5113495bSYour Name }
5910*5113495bSYour Name 
5911*5113495bSYour Name #else
__qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf,uint8_t idx,int offset)5912*5113495bSYour Name QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
5913*5113495bSYour Name 					    int offset)
5914*5113495bSYour Name {
5915*5113495bSYour Name 	uint16_t frag_offset;
5916*5113495bSYour Name 	skb_frag_t *frag;
5917*5113495bSYour Name 
5918*5113495bSYour Name 	if (qdf_unlikely(idx >= __qdf_nbuf_get_nr_frags(nbuf)))
5919*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
5920*5113495bSYour Name 
5921*5113495bSYour Name 	frag = &skb_shinfo(nbuf)->frags[idx];
5922*5113495bSYour Name 	frag_offset = frag->page_offset;
5923*5113495bSYour Name 
5924*5113495bSYour Name 	frag_offset += offset;
5925*5113495bSYour Name 	frag->page_offset = frag_offset;
5926*5113495bSYour Name 
5927*5113495bSYour Name 	__qdf_nbuf_trim_add_frag_size(nbuf, idx, -(offset), 0);
5928*5113495bSYour Name 
5929*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
5930*5113495bSYour Name }
5931*5113495bSYour Name #endif
5932*5113495bSYour Name 
5933*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_move_frag_page_offset);
5934*5113495bSYour Name 
__qdf_nbuf_remove_frag(__qdf_nbuf_t nbuf,uint16_t idx,uint16_t truesize)5935*5113495bSYour Name void __qdf_nbuf_remove_frag(__qdf_nbuf_t nbuf,
5936*5113495bSYour Name 			    uint16_t idx,
5937*5113495bSYour Name 			    uint16_t truesize)
5938*5113495bSYour Name {
5939*5113495bSYour Name 	struct page *page;
5940*5113495bSYour Name 	uint16_t frag_len;
5941*5113495bSYour Name 
5942*5113495bSYour Name 	page = skb_frag_page(&skb_shinfo(nbuf)->frags[idx]);
5943*5113495bSYour Name 
5944*5113495bSYour Name 	if (qdf_unlikely(!page))
5945*5113495bSYour Name 		return;
5946*5113495bSYour Name 
5947*5113495bSYour Name 	frag_len = qdf_nbuf_get_frag_size_by_idx(nbuf, idx);
5948*5113495bSYour Name 	put_page(page);
5949*5113495bSYour Name 	nbuf->len -= frag_len;
5950*5113495bSYour Name 	nbuf->data_len -= frag_len;
5951*5113495bSYour Name 	nbuf->truesize -= truesize;
5952*5113495bSYour Name 	skb_shinfo(nbuf)->nr_frags--;
5953*5113495bSYour Name }
5954*5113495bSYour Name 
5955*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_remove_frag);
5956*5113495bSYour Name 
__qdf_nbuf_add_rx_frag(__qdf_frag_t buf,__qdf_nbuf_t nbuf,int offset,int frag_len,unsigned int truesize,bool take_frag_ref)5957*5113495bSYour Name void __qdf_nbuf_add_rx_frag(__qdf_frag_t buf, __qdf_nbuf_t nbuf,
5958*5113495bSYour Name 			    int offset, int frag_len,
5959*5113495bSYour Name 			    unsigned int truesize, bool take_frag_ref)
5960*5113495bSYour Name {
5961*5113495bSYour Name 	struct page *page;
5962*5113495bSYour Name 	int frag_offset;
5963*5113495bSYour Name 	uint8_t nr_frag;
5964*5113495bSYour Name 
5965*5113495bSYour Name 	nr_frag = __qdf_nbuf_get_nr_frags(nbuf);
5966*5113495bSYour Name 	qdf_assert_always(nr_frag < QDF_NBUF_MAX_FRAGS);
5967*5113495bSYour Name 
5968*5113495bSYour Name 	page = virt_to_head_page(buf);
5969*5113495bSYour Name 	frag_offset = buf - page_address(page);
5970*5113495bSYour Name 
5971*5113495bSYour Name 	skb_add_rx_frag(nbuf, nr_frag, page,
5972*5113495bSYour Name 			(frag_offset + offset),
5973*5113495bSYour Name 			frag_len, truesize);
5974*5113495bSYour Name 
5975*5113495bSYour Name 	if (unlikely(take_frag_ref)) {
5976*5113495bSYour Name 		qdf_frag_count_inc(QDF_NBUF_FRAG_DEBUG_COUNT_ONE);
5977*5113495bSYour Name 		skb_frag_ref(nbuf, nr_frag);
5978*5113495bSYour Name 	}
5979*5113495bSYour Name }
5980*5113495bSYour Name 
5981*5113495bSYour Name qdf_export_symbol(__qdf_nbuf_add_rx_frag);
5982*5113495bSYour Name 
5983*5113495bSYour Name #ifdef NBUF_FRAG_MEMORY_DEBUG
5984*5113495bSYour Name 
qdf_nbuf_move_frag_page_offset_debug(qdf_nbuf_t nbuf,uint8_t idx,int offset,const char * func,uint32_t line)5985*5113495bSYour Name QDF_STATUS qdf_nbuf_move_frag_page_offset_debug(qdf_nbuf_t nbuf, uint8_t idx,
5986*5113495bSYour Name 						int offset, const char *func,
5987*5113495bSYour Name 						uint32_t line)
5988*5113495bSYour Name {
5989*5113495bSYour Name 	QDF_STATUS result;
5990*5113495bSYour Name 	qdf_frag_t p_fragp, n_fragp;
5991*5113495bSYour Name 
5992*5113495bSYour Name 	p_fragp = qdf_nbuf_get_frag_addr(nbuf, idx);
5993*5113495bSYour Name 	result = __qdf_nbuf_move_frag_page_offset(nbuf, idx, offset);
5994*5113495bSYour Name 
5995*5113495bSYour Name 	if (qdf_likely(is_initial_mem_debug_disabled))
5996*5113495bSYour Name 		return result;
5997*5113495bSYour Name 
5998*5113495bSYour Name 	n_fragp = qdf_nbuf_get_frag_addr(nbuf, idx);
5999*5113495bSYour Name 
6000*5113495bSYour Name 	/*
6001*5113495bSYour Name 	 * Update frag address in frag debug tracker
6002*5113495bSYour Name 	 * when frag offset is successfully changed in skb
6003*5113495bSYour Name 	 */
6004*5113495bSYour Name 	if (result == QDF_STATUS_SUCCESS)
6005*5113495bSYour Name 		qdf_frag_debug_update_addr(p_fragp, n_fragp, func, line);
6006*5113495bSYour Name 
6007*5113495bSYour Name 	return result;
6008*5113495bSYour Name }
6009*5113495bSYour Name 
6010*5113495bSYour Name qdf_export_symbol(qdf_nbuf_move_frag_page_offset_debug);
6011*5113495bSYour Name 
qdf_nbuf_add_rx_frag_debug(qdf_frag_t buf,qdf_nbuf_t nbuf,int offset,int frag_len,unsigned int truesize,bool take_frag_ref,const char * func,uint32_t line)6012*5113495bSYour Name void qdf_nbuf_add_rx_frag_debug(qdf_frag_t buf, qdf_nbuf_t nbuf,
6013*5113495bSYour Name 				int offset, int frag_len,
6014*5113495bSYour Name 				unsigned int truesize, bool take_frag_ref,
6015*5113495bSYour Name 				const char *func, uint32_t line)
6016*5113495bSYour Name {
6017*5113495bSYour Name 	qdf_frag_t fragp;
6018*5113495bSYour Name 	uint32_t num_nr_frags;
6019*5113495bSYour Name 
6020*5113495bSYour Name 	__qdf_nbuf_add_rx_frag(buf, nbuf, offset,
6021*5113495bSYour Name 			       frag_len, truesize, take_frag_ref);
6022*5113495bSYour Name 
6023*5113495bSYour Name 	if (qdf_likely(is_initial_mem_debug_disabled))
6024*5113495bSYour Name 		return;
6025*5113495bSYour Name 
6026*5113495bSYour Name 	num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
6027*5113495bSYour Name 
6028*5113495bSYour Name 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
6029*5113495bSYour Name 
6030*5113495bSYour Name 	fragp = qdf_nbuf_get_frag_addr(nbuf, num_nr_frags - 1);
6031*5113495bSYour Name 
6032*5113495bSYour Name 	/* Update frag address in frag debug tracking table */
6033*5113495bSYour Name 	if (fragp != buf && !take_frag_ref)
6034*5113495bSYour Name 		qdf_frag_debug_update_addr(buf, fragp, func, line);
6035*5113495bSYour Name 
6036*5113495bSYour Name 	/* Update frag refcount in frag debug tracking table */
6037*5113495bSYour Name 	qdf_frag_debug_refcount_inc(fragp, func, line);
6038*5113495bSYour Name }
6039*5113495bSYour Name 
6040*5113495bSYour Name qdf_export_symbol(qdf_nbuf_add_rx_frag_debug);
6041*5113495bSYour Name 
qdf_net_buf_debug_acquire_frag(qdf_nbuf_t buf,const char * func,uint32_t line)6042*5113495bSYour Name void qdf_net_buf_debug_acquire_frag(qdf_nbuf_t buf, const char *func,
6043*5113495bSYour Name 				    uint32_t line)
6044*5113495bSYour Name {
6045*5113495bSYour Name 	uint32_t num_nr_frags;
6046*5113495bSYour Name 	uint32_t idx = 0;
6047*5113495bSYour Name 	qdf_nbuf_t ext_list;
6048*5113495bSYour Name 	qdf_frag_t p_frag;
6049*5113495bSYour Name 
6050*5113495bSYour Name 	if (qdf_likely(is_initial_mem_debug_disabled))
6051*5113495bSYour Name 		return;
6052*5113495bSYour Name 
6053*5113495bSYour Name 	if (qdf_unlikely(!buf))
6054*5113495bSYour Name 		return;
6055*5113495bSYour Name 
6056*5113495bSYour Name 	/* Take care to update the refcount in the debug entries for frags */
6057*5113495bSYour Name 	num_nr_frags = qdf_nbuf_get_nr_frags(buf);
6058*5113495bSYour Name 
6059*5113495bSYour Name 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
6060*5113495bSYour Name 
6061*5113495bSYour Name 	while (idx < num_nr_frags) {
6062*5113495bSYour Name 		p_frag = qdf_nbuf_get_frag_addr(buf, idx);
6063*5113495bSYour Name 		if (qdf_likely(p_frag))
6064*5113495bSYour Name 			qdf_frag_debug_refcount_inc(p_frag, func, line);
6065*5113495bSYour Name 		idx++;
6066*5113495bSYour Name 	}
6067*5113495bSYour Name 
6068*5113495bSYour Name 	/*
6069*5113495bSYour Name 	 * Take care to update the refcount in the debug entries for the
6070*5113495bSYour Name 	 * frags attached to frag_list
6071*5113495bSYour Name 	 */
6072*5113495bSYour Name 	ext_list = qdf_nbuf_get_ext_list(buf);
6073*5113495bSYour Name 	while (ext_list) {
6074*5113495bSYour Name 		idx = 0;
6075*5113495bSYour Name 		num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
6076*5113495bSYour Name 
6077*5113495bSYour Name 		qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
6078*5113495bSYour Name 
6079*5113495bSYour Name 		while (idx < num_nr_frags) {
6080*5113495bSYour Name 			p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
6081*5113495bSYour Name 			if (qdf_likely(p_frag))
6082*5113495bSYour Name 				qdf_frag_debug_refcount_inc(p_frag, func, line);
6083*5113495bSYour Name 			idx++;
6084*5113495bSYour Name 		}
6085*5113495bSYour Name 		ext_list = qdf_nbuf_queue_next(ext_list);
6086*5113495bSYour Name 	}
6087*5113495bSYour Name }
6088*5113495bSYour Name 
6089*5113495bSYour Name qdf_export_symbol(qdf_net_buf_debug_acquire_frag);
6090*5113495bSYour Name 
qdf_net_buf_debug_release_frag(qdf_nbuf_t buf,const char * func,uint32_t line)6091*5113495bSYour Name void qdf_net_buf_debug_release_frag(qdf_nbuf_t buf, const char *func,
6092*5113495bSYour Name 				    uint32_t line)
6093*5113495bSYour Name {
6094*5113495bSYour Name 	uint32_t num_nr_frags;
6095*5113495bSYour Name 	qdf_nbuf_t ext_list;
6096*5113495bSYour Name 	uint32_t idx = 0;
6097*5113495bSYour Name 	qdf_frag_t p_frag;
6098*5113495bSYour Name 
6099*5113495bSYour Name 	if (qdf_likely(is_initial_mem_debug_disabled))
6100*5113495bSYour Name 		return;
6101*5113495bSYour Name 
6102*5113495bSYour Name 	if (qdf_unlikely(!buf))
6103*5113495bSYour Name 		return;
6104*5113495bSYour Name 
6105*5113495bSYour Name 	/*
6106*5113495bSYour Name 	 * Decrement refcount for frag debug nodes only when last user
6107*5113495bSYour Name 	 * of nbuf calls this API so as to avoid decrementing refcount
6108*5113495bSYour Name 	 * on every call expect the last one in case where nbuf has multiple
6109*5113495bSYour Name 	 * users
6110*5113495bSYour Name 	 */
6111*5113495bSYour Name 	if (qdf_nbuf_get_users(buf) > 1)
6112*5113495bSYour Name 		return;
6113*5113495bSYour Name 
6114*5113495bSYour Name 	/* Take care to update the refcount in the debug entries for frags */
6115*5113495bSYour Name 	num_nr_frags = qdf_nbuf_get_nr_frags(buf);
6116*5113495bSYour Name 
6117*5113495bSYour Name 	qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
6118*5113495bSYour Name 
6119*5113495bSYour Name 	while (idx < num_nr_frags) {
6120*5113495bSYour Name 		p_frag = qdf_nbuf_get_frag_addr(buf, idx);
6121*5113495bSYour Name 		if (qdf_likely(p_frag))
6122*5113495bSYour Name 			qdf_frag_debug_refcount_dec(p_frag, func, line);
6123*5113495bSYour Name 		idx++;
6124*5113495bSYour Name 	}
6125*5113495bSYour Name 
6126*5113495bSYour Name 	/* Take care to update debug entries for frags attached to frag_list */
6127*5113495bSYour Name 	ext_list = qdf_nbuf_get_ext_list(buf);
6128*5113495bSYour Name 	while (ext_list) {
6129*5113495bSYour Name 		if (qdf_nbuf_get_users(ext_list) == 1) {
6130*5113495bSYour Name 			idx = 0;
6131*5113495bSYour Name 			num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
6132*5113495bSYour Name 			qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
6133*5113495bSYour Name 			while (idx < num_nr_frags) {
6134*5113495bSYour Name 				p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
6135*5113495bSYour Name 				if (qdf_likely(p_frag))
6136*5113495bSYour Name 					qdf_frag_debug_refcount_dec(p_frag,
6137*5113495bSYour Name 								    func, line);
6138*5113495bSYour Name 				idx++;
6139*5113495bSYour Name 			}
6140*5113495bSYour Name 		}
6141*5113495bSYour Name 		ext_list = qdf_nbuf_queue_next(ext_list);
6142*5113495bSYour Name 	}
6143*5113495bSYour Name }
6144*5113495bSYour Name 
6145*5113495bSYour Name qdf_export_symbol(qdf_net_buf_debug_release_frag);
6146*5113495bSYour Name 
6147*5113495bSYour Name QDF_STATUS
qdf_nbuf_remove_frag_debug(qdf_nbuf_t nbuf,uint16_t idx,uint16_t truesize,const char * func,uint32_t line)6148*5113495bSYour Name qdf_nbuf_remove_frag_debug(qdf_nbuf_t nbuf,
6149*5113495bSYour Name 			   uint16_t idx,
6150*5113495bSYour Name 			   uint16_t truesize,
6151*5113495bSYour Name 			   const char *func,
6152*5113495bSYour Name 			   uint32_t line)
6153*5113495bSYour Name {
6154*5113495bSYour Name 	uint16_t num_frags;
6155*5113495bSYour Name 	qdf_frag_t frag;
6156*5113495bSYour Name 
6157*5113495bSYour Name 	if (qdf_unlikely(!nbuf))
6158*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
6159*5113495bSYour Name 
6160*5113495bSYour Name 	num_frags = qdf_nbuf_get_nr_frags(nbuf);
6161*5113495bSYour Name 	if (idx >= num_frags)
6162*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
6163*5113495bSYour Name 
6164*5113495bSYour Name 	if (qdf_likely(is_initial_mem_debug_disabled)) {
6165*5113495bSYour Name 		__qdf_nbuf_remove_frag(nbuf, idx, truesize);
6166*5113495bSYour Name 		return QDF_STATUS_SUCCESS;
6167*5113495bSYour Name 	}
6168*5113495bSYour Name 
6169*5113495bSYour Name 	frag = qdf_nbuf_get_frag_addr(nbuf, idx);
6170*5113495bSYour Name 	if (qdf_likely(frag))
6171*5113495bSYour Name 		qdf_frag_debug_refcount_dec(frag, func, line);
6172*5113495bSYour Name 
6173*5113495bSYour Name 	__qdf_nbuf_remove_frag(nbuf, idx, truesize);
6174*5113495bSYour Name 
6175*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
6176*5113495bSYour Name }
6177*5113495bSYour Name 
6178*5113495bSYour Name qdf_export_symbol(qdf_nbuf_remove_frag_debug);
6179*5113495bSYour Name 
6180*5113495bSYour Name #endif /* NBUF_FRAG_MEMORY_DEBUG */
6181*5113495bSYour Name 
qdf_get_nbuf_valid_frag(qdf_nbuf_t nbuf)6182*5113495bSYour Name qdf_nbuf_t qdf_get_nbuf_valid_frag(qdf_nbuf_t nbuf)
6183*5113495bSYour Name {
6184*5113495bSYour Name 	qdf_nbuf_t last_nbuf;
6185*5113495bSYour Name 	uint32_t num_frags;
6186*5113495bSYour Name 
6187*5113495bSYour Name 	if (qdf_unlikely(!nbuf))
6188*5113495bSYour Name 		return NULL;
6189*5113495bSYour Name 
6190*5113495bSYour Name 	num_frags = qdf_nbuf_get_nr_frags(nbuf);
6191*5113495bSYour Name 
6192*5113495bSYour Name 	/* Check nbuf has enough memory to store frag memory */
6193*5113495bSYour Name 	if (num_frags < QDF_NBUF_MAX_FRAGS)
6194*5113495bSYour Name 		return nbuf;
6195*5113495bSYour Name 
6196*5113495bSYour Name 	if (!__qdf_nbuf_has_fraglist(nbuf))
6197*5113495bSYour Name 		return NULL;
6198*5113495bSYour Name 
6199*5113495bSYour Name 	last_nbuf = __qdf_nbuf_get_last_frag_list_nbuf(nbuf);
6200*5113495bSYour Name 	if (qdf_unlikely(!last_nbuf))
6201*5113495bSYour Name 		return NULL;
6202*5113495bSYour Name 
6203*5113495bSYour Name 	num_frags = qdf_nbuf_get_nr_frags(last_nbuf);
6204*5113495bSYour Name 	if (num_frags < QDF_NBUF_MAX_FRAGS)
6205*5113495bSYour Name 		return last_nbuf;
6206*5113495bSYour Name 
6207*5113495bSYour Name 	return NULL;
6208*5113495bSYour Name }
6209*5113495bSYour Name 
6210*5113495bSYour Name qdf_export_symbol(qdf_get_nbuf_valid_frag);
6211*5113495bSYour Name 
6212*5113495bSYour Name QDF_STATUS
qdf_nbuf_add_frag_debug(qdf_device_t osdev,qdf_frag_t buf,qdf_nbuf_t nbuf,int offset,int frag_len,unsigned int truesize,bool take_frag_ref,unsigned int minsize,const char * func,uint32_t line)6213*5113495bSYour Name qdf_nbuf_add_frag_debug(qdf_device_t osdev, qdf_frag_t buf,
6214*5113495bSYour Name 			qdf_nbuf_t nbuf, int offset,
6215*5113495bSYour Name 			int frag_len, unsigned int truesize,
6216*5113495bSYour Name 			bool take_frag_ref, unsigned int minsize,
6217*5113495bSYour Name 			const char *func, uint32_t line)
6218*5113495bSYour Name {
6219*5113495bSYour Name 	qdf_nbuf_t cur_nbuf;
6220*5113495bSYour Name 	qdf_nbuf_t this_nbuf;
6221*5113495bSYour Name 
6222*5113495bSYour Name 	cur_nbuf = nbuf;
6223*5113495bSYour Name 	this_nbuf = nbuf;
6224*5113495bSYour Name 
6225*5113495bSYour Name 	if (qdf_unlikely(!frag_len || !buf)) {
6226*5113495bSYour Name 		qdf_nofl_err("%s : %d frag[ buf[%pK] len[%d]] not valid\n",
6227*5113495bSYour Name 			     func, line,
6228*5113495bSYour Name 			     buf, frag_len);
6229*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
6230*5113495bSYour Name 	}
6231*5113495bSYour Name 
6232*5113495bSYour Name 	this_nbuf = qdf_get_nbuf_valid_frag(this_nbuf);
6233*5113495bSYour Name 
6234*5113495bSYour Name 	if (this_nbuf) {
6235*5113495bSYour Name 		cur_nbuf = this_nbuf;
6236*5113495bSYour Name 	} else {
6237*5113495bSYour Name 		/* allocate a dummy mpdu buffer of 64 bytes headroom */
6238*5113495bSYour Name 		this_nbuf = qdf_nbuf_alloc(osdev, minsize, minsize, 4, false);
6239*5113495bSYour Name 		if (qdf_unlikely(!this_nbuf)) {
6240*5113495bSYour Name 			qdf_nofl_err("%s : %d no memory to allocate\n",
6241*5113495bSYour Name 				     func, line);
6242*5113495bSYour Name 			return QDF_STATUS_E_NOMEM;
6243*5113495bSYour Name 		}
6244*5113495bSYour Name 	}
6245*5113495bSYour Name 
6246*5113495bSYour Name 	qdf_nbuf_add_rx_frag(buf, this_nbuf, offset, frag_len, truesize,
6247*5113495bSYour Name 			     take_frag_ref);
6248*5113495bSYour Name 
6249*5113495bSYour Name 	if (this_nbuf != cur_nbuf) {
6250*5113495bSYour Name 		/* add new skb to frag list */
6251*5113495bSYour Name 		qdf_nbuf_append_ext_list(nbuf, this_nbuf,
6252*5113495bSYour Name 					 qdf_nbuf_len(this_nbuf));
6253*5113495bSYour Name 	}
6254*5113495bSYour Name 
6255*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
6256*5113495bSYour Name }
6257*5113495bSYour Name 
6258*5113495bSYour Name qdf_export_symbol(qdf_nbuf_add_frag_debug);
6259*5113495bSYour Name 
6260*5113495bSYour Name #ifdef MEMORY_DEBUG
qdf_nbuf_acquire_track_lock(uint32_t index,unsigned long irq_flag)6261*5113495bSYour Name void qdf_nbuf_acquire_track_lock(uint32_t index,
6262*5113495bSYour Name 				 unsigned long irq_flag)
6263*5113495bSYour Name {
6264*5113495bSYour Name 	spin_lock_irqsave(&g_qdf_net_buf_track_lock[index],
6265*5113495bSYour Name 			  irq_flag);
6266*5113495bSYour Name }
6267*5113495bSYour Name 
qdf_nbuf_release_track_lock(uint32_t index,unsigned long irq_flag)6268*5113495bSYour Name void qdf_nbuf_release_track_lock(uint32_t index,
6269*5113495bSYour Name 				 unsigned long irq_flag)
6270*5113495bSYour Name {
6271*5113495bSYour Name 	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[index],
6272*5113495bSYour Name 			       irq_flag);
6273*5113495bSYour Name }
6274*5113495bSYour Name 
qdf_nbuf_get_track_tbl(uint32_t index)6275*5113495bSYour Name QDF_NBUF_TRACK *qdf_nbuf_get_track_tbl(uint32_t index)
6276*5113495bSYour Name {
6277*5113495bSYour Name 	return gp_qdf_net_buf_track_tbl[index];
6278*5113495bSYour Name }
6279*5113495bSYour Name #endif /* MEMORY_DEBUG */
6280*5113495bSYour Name 
6281*5113495bSYour Name #ifdef ENHANCED_OS_ABSTRACTION
qdf_nbuf_set_timestamp(qdf_nbuf_t buf)6282*5113495bSYour Name void qdf_nbuf_set_timestamp(qdf_nbuf_t buf)
6283*5113495bSYour Name {
6284*5113495bSYour Name 	__qdf_nbuf_set_timestamp(buf);
6285*5113495bSYour Name }
6286*5113495bSYour Name 
6287*5113495bSYour Name qdf_export_symbol(qdf_nbuf_set_timestamp);
6288*5113495bSYour Name 
qdf_nbuf_get_timestamp(qdf_nbuf_t buf)6289*5113495bSYour Name uint64_t qdf_nbuf_get_timestamp(qdf_nbuf_t buf)
6290*5113495bSYour Name {
6291*5113495bSYour Name 	return __qdf_nbuf_get_timestamp(buf);
6292*5113495bSYour Name }
6293*5113495bSYour Name 
6294*5113495bSYour Name qdf_export_symbol(qdf_nbuf_get_timestamp);
6295*5113495bSYour Name 
qdf_nbuf_get_timestamp_us(qdf_nbuf_t buf)6296*5113495bSYour Name uint64_t qdf_nbuf_get_timestamp_us(qdf_nbuf_t buf)
6297*5113495bSYour Name {
6298*5113495bSYour Name 	return __qdf_nbuf_get_timestamp_us(buf);
6299*5113495bSYour Name }
6300*5113495bSYour Name 
6301*5113495bSYour Name qdf_export_symbol(qdf_nbuf_get_timestamp_us);
6302*5113495bSYour Name 
qdf_nbuf_get_timedelta_us(qdf_nbuf_t buf)6303*5113495bSYour Name uint64_t qdf_nbuf_get_timedelta_us(qdf_nbuf_t buf)
6304*5113495bSYour Name {
6305*5113495bSYour Name 	return __qdf_nbuf_get_timedelta_us(buf);
6306*5113495bSYour Name }
6307*5113495bSYour Name 
6308*5113495bSYour Name qdf_export_symbol(qdf_nbuf_get_timedelta_us);
6309*5113495bSYour Name 
qdf_nbuf_get_timedelta_ms(qdf_nbuf_t buf)6310*5113495bSYour Name uint64_t qdf_nbuf_get_timedelta_ms(qdf_nbuf_t buf)
6311*5113495bSYour Name {
6312*5113495bSYour Name 	return __qdf_nbuf_get_timedelta_ms(buf);
6313*5113495bSYour Name }
6314*5113495bSYour Name 
6315*5113495bSYour Name qdf_export_symbol(qdf_nbuf_get_timedelta_ms);
6316*5113495bSYour Name 
qdf_nbuf_net_timedelta(qdf_ktime_t t)6317*5113495bSYour Name qdf_ktime_t qdf_nbuf_net_timedelta(qdf_ktime_t t)
6318*5113495bSYour Name {
6319*5113495bSYour Name 	return __qdf_nbuf_net_timedelta(t);
6320*5113495bSYour Name }
6321*5113495bSYour Name 
6322*5113495bSYour Name qdf_export_symbol(qdf_nbuf_net_timedelta);
6323*5113495bSYour Name #endif
6324