1 /*
2 * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /**
21 * DOC: i_qdf_nbuf.h
22 * This file provides OS dependent nbuf API's.
23 */
24
25 #ifndef _I_QDF_NBUF_H
26 #define _I_QDF_NBUF_H
27
28 #include <linux/skbuff.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/version.h>
33 #include <asm/cacheflush.h>
34 #include <qdf_types.h>
35 #include <qdf_net_types.h>
36 #include <qdf_status.h>
37 #include <qdf_util.h>
38 #include <qdf_mem.h>
39 #include <linux/tcp.h>
40 #include <qdf_util.h>
41 #include <qdf_nbuf_frag.h>
42 #include "qdf_time.h"
43
44 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0))
45 /* Since commit
46 * baebdf48c3600 ("net: dev: Makes sure netif_rx() can be invoked in any context.")
47 *
48 * the function netif_rx() can be used in preemptible/thread context as
49 * well as in interrupt context.
50 *
51 * Use netif_rx().
52 */
53 #define netif_rx_ni(skb) netif_rx(skb)
54 #endif
55
56 /*
57 * Use socket buffer as the underlying implementation as skbuf .
58 * Linux use sk_buff to represent both packet and data,
59 * so we use sk_buffer to represent both skbuf .
60 */
61 typedef struct sk_buff *__qdf_nbuf_t;
62
63 /*
64 * typedef __qdf_nbuf_queue_head_t - abstraction for sk_buff_head linux struct
65 *
66 * This is used for skb queue management via linux skb buff head APIs
67 */
68 typedef struct sk_buff_head __qdf_nbuf_queue_head_t;
69
70 /*
71 * typedef __qdf_nbuf_shared_info_t for skb_shinfo linux struct
72 *
73 * This is used for skb shared info via linux skb shinfo APIs
74 */
75 typedef struct skb_shared_info *__qdf_nbuf_shared_info_t;
76
77 #define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1
78
79 #define QDF_SHINFO_SIZE SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
80
81 /* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS -
82 * max tx fragments added by the driver
83 * The driver will always add one tx fragment (the tx descriptor)
84 */
85 #define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2
86 #define QDF_NBUF_CB_PACKET_TYPE_EAPOL 1
87 #define QDF_NBUF_CB_PACKET_TYPE_ARP 2
88 #define QDF_NBUF_CB_PACKET_TYPE_WAPI 3
89 #define QDF_NBUF_CB_PACKET_TYPE_DHCP 4
90 #define QDF_NBUF_CB_PACKET_TYPE_ICMP 5
91 #define QDF_NBUF_CB_PACKET_TYPE_ICMPv6 6
92 #define QDF_NBUF_CB_PACKET_TYPE_DHCPV6 7
93 #define QDF_NBUF_CB_PACKET_TYPE_END_INDICATION 8
94 #define QDF_NBUF_CB_PACKET_TYPE_TCP_ACK 9
95
96 #define RADIOTAP_BASE_HEADER_LEN sizeof(struct ieee80211_radiotap_header)
97
98 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))
99 #define IEEE80211_RADIOTAP_HE 23
100 #define IEEE80211_RADIOTAP_HE_MU 24
101 #endif
102
103 #define IEEE80211_RADIOTAP_HE_MU_OTHER 25
104
105 #define IEEE80211_RADIOTAP_EXT1_USIG 1
106 #define IEEE80211_RADIOTAP_EXT1_EHT 2
107
108 /* mark the first packet after wow wakeup */
109 #define QDF_MARK_FIRST_WAKEUP_PACKET 0x80000000
110
111 /* TCP Related MASK */
112 #define QDF_NBUF_PKT_TCPOP_FIN 0x01
113 #define QDF_NBUF_PKT_TCPOP_FIN_ACK 0x11
114 #define QDF_NBUF_PKT_TCPOP_RST 0x04
115
116 /*
117 * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
118 */
119 typedef union {
120 uint64_t u64;
121 qdf_dma_addr_t dma_addr;
122 } qdf_paddr_t;
123
124 typedef void (*qdf_nbuf_trace_update_t)(char *);
125 typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
126
127 #define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb)
128
129 #define __qdf_nbuf_mapped_paddr_set(skb, paddr) \
130 (QDF_NBUF_CB_PADDR(skb) = paddr)
131
132 #define __qdf_nbuf_frag_push_head( \
133 skb, frag_len, frag_vaddr, frag_paddr) \
134 do { \
135 QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1; \
136 QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr; \
137 QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr; \
138 QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len; \
139 } while (0)
140
141 #define __qdf_nbuf_get_frag_vaddr(skb, frag_num) \
142 ((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ? \
143 QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
144
145 #define __qdf_nbuf_get_frag_vaddr_always(skb) \
146 QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb)
147
148 #define __qdf_nbuf_get_frag_paddr(skb, frag_num) \
149 ((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ? \
150 QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) : \
151 /* assume that the OS only provides a single fragment */ \
152 QDF_NBUF_CB_PADDR(skb))
153
154 #define __qdf_nbuf_get_tx_frag_paddr(skb) QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb)
155
156 #define __qdf_nbuf_get_frag_len(skb, frag_num) \
157 ((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ? \
158 QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
159
160 #define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num) \
161 ((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) \
162 ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb)) \
163 : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
164
165 #define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm) \
166 do { \
167 if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) \
168 frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS; \
169 if (frag_num) \
170 QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = \
171 is_wstrm; \
172 else \
173 QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = \
174 is_wstrm; \
175 } while (0)
176
177 #define __qdf_nbuf_set_vdev_ctx(skb, vdev_id) \
178 do { \
179 QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_id); \
180 } while (0)
181
182 #define __qdf_nbuf_get_vdev_ctx(skb) \
183 QDF_NBUF_CB_TX_VDEV_CTX((skb))
184
185 #define __qdf_nbuf_set_tx_ftype(skb, type) \
186 do { \
187 QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
188 } while (0)
189
190 #define __qdf_nbuf_set_vdev_xmit_type(skb, type) \
191 do { \
192 QDF_NBUF_CB_PKT_XMIT_TYPE((skb)) = (type); \
193 } while (0)
194
195 #define __qdf_nbuf_get_tx_ftype(skb) \
196 QDF_NBUF_CB_TX_FTYPE((skb))
197
198 #define __qdf_nbuf_get_vdev_xmit_type(skb) \
199 QDF_NBUF_CB_PKT_XMIT_TYPE((skb))
200
201
202 #define __qdf_nbuf_set_rx_ftype(skb, type) \
203 do { \
204 QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \
205 } while (0)
206
207 #define __qdf_nbuf_get_rx_ftype(skb) \
208 QDF_NBUF_CB_RX_FTYPE((skb))
209
210 #define __qdf_nbuf_set_rx_chfrag_start(skb, val) \
211 ((QDF_NBUF_CB_RX_CHFRAG_START((skb))) = val)
212
213 #define __qdf_nbuf_is_rx_chfrag_start(skb) \
214 (QDF_NBUF_CB_RX_CHFRAG_START((skb)))
215
216 #define __qdf_nbuf_set_rx_chfrag_cont(skb, val) \
217 do { \
218 (QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) = val; \
219 } while (0)
220
221 #define __qdf_nbuf_is_rx_chfrag_cont(skb) \
222 (QDF_NBUF_CB_RX_CHFRAG_CONT((skb)))
223
224 #define __qdf_nbuf_set_rx_chfrag_end(skb, val) \
225 ((QDF_NBUF_CB_RX_CHFRAG_END((skb))) = val)
226
227 #define __qdf_nbuf_is_rx_chfrag_end(skb) \
228 (QDF_NBUF_CB_RX_CHFRAG_END((skb)))
229
230 #define __qdf_nbuf_set_da_mcbc(skb, val) \
231 ((QDF_NBUF_CB_RX_DA_MCBC((skb))) = val)
232
233 #define __qdf_nbuf_is_da_mcbc(skb) \
234 (QDF_NBUF_CB_RX_DA_MCBC((skb)))
235
236 #define __qdf_nbuf_set_da_valid(skb, val) \
237 ((QDF_NBUF_CB_RX_DA_VALID((skb))) = val)
238
239 #define __qdf_nbuf_is_da_valid(skb) \
240 (QDF_NBUF_CB_RX_DA_VALID((skb)))
241
242 #define __qdf_nbuf_set_sa_valid(skb, val) \
243 ((QDF_NBUF_CB_RX_SA_VALID((skb))) = val)
244
245 #define __qdf_nbuf_is_sa_valid(skb) \
246 (QDF_NBUF_CB_RX_SA_VALID((skb)))
247
248 #define __qdf_nbuf_set_rx_retry_flag(skb, val) \
249 ((QDF_NBUF_CB_RX_RETRY_FLAG((skb))) = val)
250
251 #define __qdf_nbuf_is_rx_retry_flag(skb) \
252 (QDF_NBUF_CB_RX_RETRY_FLAG((skb)))
253
254 #define __qdf_nbuf_set_raw_frame(skb, val) \
255 ((QDF_NBUF_CB_RX_RAW_FRAME((skb))) = val)
256
257 #define __qdf_nbuf_is_raw_frame(skb) \
258 (QDF_NBUF_CB_RX_RAW_FRAME((skb)))
259
260 #define __qdf_nbuf_is_fr_ds_set(skb) \
261 (QDF_NBUF_CB_RX_FROM_DS((skb)))
262
263 #define __qdf_nbuf_is_to_ds_set(skb) \
264 (QDF_NBUF_CB_RX_TO_DS((skb)))
265
266 #define __qdf_nbuf_get_tid_val(skb) \
267 (QDF_NBUF_CB_RX_TID_VAL((skb)))
268
269 #define __qdf_nbuf_set_tid_val(skb, val) \
270 ((QDF_NBUF_CB_RX_TID_VAL((skb))) = val)
271
272 #define __qdf_nbuf_set_is_frag(skb, val) \
273 ((QDF_NBUF_CB_RX_IS_FRAG((skb))) = val)
274
275 #define __qdf_nbuf_is_frag(skb) \
276 (QDF_NBUF_CB_RX_IS_FRAG((skb)))
277
278 #define __qdf_nbuf_set_tx_chfrag_start(skb, val) \
279 ((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val)
280
281 #define __qdf_nbuf_is_tx_chfrag_start(skb) \
282 (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb)))
283
284 #define __qdf_nbuf_set_tx_chfrag_cont(skb, val) \
285 do { \
286 (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) = val; \
287 } while (0)
288
289 #define __qdf_nbuf_is_tx_chfrag_cont(skb) \
290 (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb)))
291
292 #define __qdf_nbuf_set_tx_chfrag_end(skb, val) \
293 ((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val)
294
295 #define __qdf_nbuf_is_tx_chfrag_end(skb) \
296 (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb)))
297
298 #define __qdf_nbuf_trace_set_proto_type(skb, proto_type) \
299 (QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type))
300
301 #define __qdf_nbuf_trace_get_proto_type(skb) \
302 QDF_NBUF_CB_TX_PROTO_TYPE(skb)
303
304 #define __qdf_nbuf_queue_walk_safe(queue, var, tvar) \
305 skb_queue_walk_safe(queue, var, tvar)
306
307 /*
308 * prototypes. Implemented in qdf_nbuf.c
309 */
310
311 /**
312 * __qdf_nbuf_alloc() - Allocate nbuf
313 * @osdev: Device handle
314 * @size: Netbuf requested size
315 * @reserve: headroom to start with
316 * @align: Align
317 * @prio: Priority
318 * @func: Function name of the call site
319 * @line: line number of the call site
320 *
321 * This allocates a nbuf aligns if needed and reserves some space in the front,
322 * since the reserve is done after alignment the reserve value if being
323 * unaligned will result in an unaligned address.
324 *
325 * Return: nbuf or %NULL if no memory
326 */
327 __qdf_nbuf_t
328 __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
329 int prio, const char *func, uint32_t line);
330
331 __qdf_nbuf_t __qdf_nbuf_alloc_simple(__qdf_device_t osdev, size_t size,
332 const char *func, uint32_t line);
333
334 #if defined(QCA_DP_NBUF_FAST_PPEDS)
335 /**
336 * __qdf_nbuf_alloc_ppe_ds() - Allocates nbuf
337 * @osdev: Device handle
338 * @size: Netbuf requested size
339 * @func: Function name of the call site
340 * @line: line number of the call site
341 *
342 * This allocates an nbuf for wifi module
343 * in DS mode and uses __netdev_alloc_skb_no_skb_reset API.
344 * The netdev API invokes skb_recycler_alloc with reset_skb
345 * as false. Hence, recycler pool will not do reset_struct
346 * when it allocates DS used buffer to DS module, which will
347 * helps to improve the performance
348 *
349 * Return: nbuf or %NULL if no memory
350 */
351
352 __qdf_nbuf_t __qdf_nbuf_alloc_ppe_ds(__qdf_device_t osdev, size_t size,
353 const char *func, uint32_t line);
354 #endif /* QCA_DP_NBUF_FAST_PPEDS */
355
356 /**
357 * __qdf_nbuf_frag_alloc() - Allocate nbuf in page fragment way.
358 * @osdev: Device handle
359 * @size: Netbuf requested size
360 * @reserve: headroom to start with
361 * @align: Align
362 * @prio: Priority
363 * @func: Function name of the call site
364 * @line: line number of the call site
365 *
366 * This allocates a nbuf aligns if needed and reserves some space in the front,
367 * since the reserve is done after alignment the reserve value if being
368 * unaligned will result in an unaligned address.
369 * It will call into kernel page fragment APIs, long time keeping for scattered
370 * allocations should be considered for avoidance.
371 * This also brings in more probability of page frag allocation failures during
372 * low memory situation. In case of page frag allocation failure, fallback to
373 * non-frag slab allocations.
374 *
375 * Return: nbuf or %NULL if no memory
376 */
377 __qdf_nbuf_t
378 __qdf_nbuf_frag_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
379 int prio, const char *func, uint32_t line);
380
381 /**
382 * __qdf_nbuf_alloc_no_recycler() - Allocates skb
383 * @size: Size to be allocated for skb
384 * @reserve: Reserve headroom size
385 * @align: Align data
386 * @func: Function name of the call site
387 * @line: Line number of the callsite
388 *
389 * This API allocates a nbuf and aligns it if needed and reserves some headroom
390 * space after the alignment where nbuf is not allocated from skb recycler pool.
391 *
392 * Return: Allocated nbuf pointer
393 */
394 __qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align,
395 const char *func, uint32_t line);
396
397 /**
398 * __qdf_nbuf_page_frag_alloc() - Allocate nbuf from @pf_cache page
399 * fragment cache
400 * @osdev: Device handle
401 * @size: Netbuf requested size
402 * @reserve: headroom to start with
403 * @align: Align
404 * @pf_cache: Reference to page fragment cache
405 * @func: Function name of the call site
406 * @line: line number of the call site
407 *
408 * This allocates a nbuf, aligns if needed and reserves some space in the front,
409 * since the reserve is done after alignment the reserve value if being
410 * unaligned will result in an unaligned address.
411 *
412 * It will call kernel page fragment APIs for allocation of skb->head, prefer
413 * this API for buffers that are allocated and freed only once i.e., for
414 * reusable buffers.
415 *
416 * Return: nbuf or %NULL if no memory
417 */
418 __qdf_nbuf_t
419 __qdf_nbuf_page_frag_alloc(__qdf_device_t osdev, size_t size, int reserve,
420 int align, __qdf_frag_cache_t *pf_cache,
421 const char *func, uint32_t line);
422
423 /**
424 * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
425 * @nbuf: Pointer to network buffer
426 *
427 * if GFP_ATOMIC is overkill then we can check whether its
428 * called from interrupt context and then do it or else in
429 * normal case use GFP_KERNEL
430 *
431 * example use "in_irq() || irqs_disabled()"
432 *
433 * Return: cloned skb
434 */
435 __qdf_nbuf_t __qdf_nbuf_clone(__qdf_nbuf_t nbuf);
436
437 /**
438 * __qdf_nbuf_free() - free the nbuf its interrupt safe
439 * @skb: Pointer to network buffer
440 *
441 * Return: none
442 */
443 void __qdf_nbuf_free(struct sk_buff *skb);
444
445 /**
446 * __qdf_nbuf_map() - map a buffer to local bus address space
447 * @osdev: OS device
448 * @skb: Pointer to network buffer
449 * @dir: Direction
450 *
451 * Return: QDF_STATUS
452 */
453 QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev,
454 struct sk_buff *skb, qdf_dma_dir_t dir);
455
456 /**
457 * __qdf_nbuf_unmap() - to unmap a previously mapped buf
458 * @osdev: OS device
459 * @skb: Pointer to network buffer
460 * @dir: dma direction
461 *
462 * Return: none
463 */
464 void __qdf_nbuf_unmap(__qdf_device_t osdev,
465 struct sk_buff *skb, qdf_dma_dir_t dir);
466
467 /**
468 * __qdf_nbuf_map_single() - map a single buffer to local bus address space
469 * @osdev: OS device
470 * @skb: Pointer to network buffer
471 * @dir: Direction
472 *
473 * Return: QDF_STATUS
474 */
475 QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev,
476 struct sk_buff *skb, qdf_dma_dir_t dir);
477
478 /**
479 * __qdf_nbuf_unmap_single() - unmap a previously mapped buf
480 * @osdev: OS device
481 * @skb: Pointer to network buffer
482 * @dir: Direction
483 *
484 * Return: none
485 */
486 void __qdf_nbuf_unmap_single(__qdf_device_t osdev,
487 struct sk_buff *skb, qdf_dma_dir_t dir);
488
489 /**
490 * __qdf_nbuf_reg_trace_cb() - register trace callback
491 * @cb_func_ptr: Pointer to trace callback function
492 *
493 * Return: none
494 */
495 void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr);
496
497 /**
498 * __qdf_nbuf_reg_free_cb() - register nbuf free callback
499 * @cb_func_ptr: function pointer to the nbuf free callback
500 *
501 * This function registers a callback function for nbuf free.
502 *
503 * Return: none
504 */
505 void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr);
506
507 /**
508 * __qdf_nbuf_dmamap_create() - create a DMA map.
509 * @osdev: qdf device handle
510 * @dmap: dma map handle
511 *
512 * This can later be used to map networking buffers. They :
513 * - need space in adf_drv's software descriptor
514 * - are typically created during adf_drv_create
515 * - need to be created before any API(qdf_nbuf_map) that uses them
516 *
517 * Return: QDF STATUS
518 */
519 QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap);
520
521 /**
522 * __qdf_nbuf_dmamap_destroy() - delete a dma map
523 * @osdev: qdf device handle
524 * @dmap: dma map handle
525 *
526 * Return: none
527 */
528 void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap);
529
530 /**
531 * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map
532 * @dmap: dma map
533 * @cb: callback
534 * @arg: argument
535 *
536 * Return: none
537 */
538 void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg);
539
540 /**
541 * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf
542 * @osdev: os device
543 * @skb: skb handle
544 * @dir: dma direction
545 * @nbytes: number of bytes to be mapped
546 *
547 * Return: QDF_STATUS
548 */
549 QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
550 qdf_dma_dir_t dir, int nbytes);
551
552 /**
553 * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf
554 * @osdev: OS device
555 * @skb: skb handle
556 * @dir: direction
557 * @nbytes: number of bytes
558 *
559 * Return: none
560 */
561 void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
562 qdf_dma_dir_t dir, int nbytes);
563
564 /**
565 * __qdf_nbuf_sync_for_cpu() - nbuf sync
566 * @osdev: os device
567 * @skb: sk buff
568 * @dir: direction
569 *
570 * Return: none
571 */
572 void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
573 qdf_dma_dir_t dir);
574
575 /**
576 * __qdf_nbuf_dma_map_info() - return the dma map info
577 * @bmap: dma map
578 * @sg: dma map info
579 *
580 * Return: none
581 */
582 void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg);
583
584 /**
585 * __qdf_nbuf_get_frag_size() - get frag size
586 * @nbuf: sk buffer
587 * @cur_frag: current frag
588 *
589 * Return: frag size
590 */
591 uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag);
592
593 /**
594 * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is
595 * specified by the index
596 * @skb: sk buff
597 * @sg: scatter/gather list of all the frags
598 *
599 * Return: none
600 */
601 void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t *sg);
602
603 /**
604 * __qdf_nbuf_frag_map() - dma map frag
605 * @osdev: os device
606 * @nbuf: sk buff
607 * @offset: offset
608 * @dir: direction
609 * @cur_frag: current fragment
610 *
611 * Return: QDF status
612 */
613 QDF_STATUS __qdf_nbuf_frag_map(
614 qdf_device_t osdev, __qdf_nbuf_t nbuf,
615 int offset, qdf_dma_dir_t dir, int cur_frag);
616
617 /**
618 * qdf_nbuf_classify_pkt() - classify packet
619 * @skb: sk buff
620 *
621 * Return: none
622 */
623 void qdf_nbuf_classify_pkt(struct sk_buff *skb);
624
625 /**
626 * __qdf_nbuf_is_ipv4_wapi_pkt() - check if skb data is a wapi packet
627 * @skb: Pointer to network buffer
628 *
629 * This api is for ipv4 packet.
630 *
631 * Return: true if packet is WAPI packet
632 * false otherwise.
633 */
634 bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb);
635
636 /**
637 * __qdf_nbuf_is_ipv4_tdls_pkt() - check if skb data is a tdls packet
638 * @skb: Pointer to network buffer
639 *
640 * This api is for ipv4 packet.
641 *
642 * Return: true if packet is tdls packet
643 * false otherwise.
644 */
645 bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb);
646
647 /**
648 * __qdf_nbuf_data_is_ipv4_pkt() - check if packet is a ipv4 packet
649 * @data: Pointer to network data
650 *
651 * This api is for Tx packets.
652 *
653 * Return: true if packet is ipv4 packet
654 * false otherwise
655 */
656 bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data);
657
658 /**
659 * __qdf_nbuf_data_is_ipv6_pkt() - check if it is IPV6 packet.
660 * @data: Pointer to IPV6 packet data buffer
661 *
662 * This func. checks whether it is a IPV6 packet or not.
663 *
664 * Return: TRUE if it is a IPV6 packet
665 * FALSE if not
666 */
667 bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data);
668
669 /**
670 * __qdf_nbuf_data_is_ipv4_mcast_pkt() - check if it is IPV4 multicast packet.
671 * @data: Pointer to IPV4 packet data buffer
672 *
673 * This func. checks whether it is a IPV4 multicast packet or not.
674 *
675 * Return: TRUE if it is a IPV4 multicast packet
676 * FALSE if not
677 */
678 bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data);
679
680 /**
681 * __qdf_nbuf_data_is_ipv6_mcast_pkt() - check if it is IPV6 multicast packet.
682 * @data: Pointer to IPV6 packet data buffer
683 *
684 * This func. checks whether it is a IPV6 multicast packet or not.
685 *
686 * Return: TRUE if it is a IPV6 multicast packet
687 * FALSE if not
688 */
689 bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data);
690
691 /**
692 * __qdf_nbuf_data_is_icmp_pkt() - check if it is IPV4 ICMP packet.
693 * @data: Pointer to IPV4 ICMP packet data buffer
694 *
695 * This func. checks whether it is a ICMP packet or not.
696 *
697 * Return: TRUE if it is a ICMP packet
698 * FALSE if not
699 */
700 bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data);
701
702 /**
703 * __qdf_nbuf_data_is_icmpv6_pkt() - check if it is IPV6 ICMPV6 packet.
704 * @data: Pointer to IPV6 ICMPV6 packet data buffer
705 *
706 * This func. checks whether it is a ICMPV6 packet or not.
707 *
708 * Return: TRUE if it is a ICMPV6 packet
709 * FALSE if not
710 */
711 bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data);
712
713 /**
714 * __qdf_nbuf_data_is_ipv4_udp_pkt() - check if it is IPV4 UDP packet.
715 * @data: Pointer to IPV4 UDP packet data buffer
716 *
717 * This func. checks whether it is a IPV4 UDP packet or not.
718 *
719 * Return: TRUE if it is a IPV4 UDP packet
720 * FALSE if not
721 */
722 bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data);
723
724 /**
725 * __qdf_nbuf_data_is_ipv4_tcp_pkt() - check if it is IPV4 TCP packet.
726 * @data: Pointer to IPV4 TCP packet data buffer
727 *
728 * This func. checks whether it is a IPV4 TCP packet or not.
729 *
730 * Return: TRUE if it is a IPV4 TCP packet
731 * FALSE if not
732 */
733 bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data);
734
735 /**
736 * __qdf_nbuf_data_is_ipv6_udp_pkt() - check if it is IPV6 UDP packet.
737 * @data: Pointer to IPV6 UDP packet data buffer
738 *
739 * This func. checks whether it is a IPV6 UDP packet or not.
740 *
741 * Return: TRUE if it is a IPV6 UDP packet
742 * FALSE if not
743 */
744 bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data);
745
746 /**
747 * __qdf_nbuf_data_is_ipv6_tcp_pkt() - check if it is IPV6 TCP packet.
748 * @data: Pointer to IPV6 TCP packet data buffer
749 *
750 * This func. checks whether it is a IPV6 TCP packet or not.
751 *
752 * Return: TRUE if it is a IPV6 TCP packet
753 * FALSE if not
754 */
755 bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data);
756
757 /**
758 * __qdf_nbuf_data_is_ipv4_dhcp_pkt() - check if skb data is a dhcp packet
759 * @data: Pointer to network data buffer
760 *
761 * This api is for ipv4 packet.
762 *
763 * Return: true if packet is DHCP packet
764 * false otherwise
765 */
766 bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data);
767
768 /**
769 * __qdf_nbuf_data_is_ipv6_dhcp_pkt() - check if skb data is a dhcp packet
770 * @data: Pointer to network data buffer
771 *
772 * This api is for ipv6 packet.
773 *
774 * Return: true if packet is DHCP packet
775 * false otherwise
776 */
777 bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data);
778
779 /**
780 * __qdf_nbuf_data_is_ipv6_mdns_pkt() - check if skb data is a mdns packet
781 * @data: Pointer to network data buffer
782 *
783 * This api is for ipv6 packet.
784 *
785 * Return: true if packet is MDNS packet
786 * false otherwise
787 */
788 bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data);
789
790 /**
791 * __qdf_nbuf_data_is_ipv4_eapol_pkt() - check if skb data is a eapol packet
792 * @data: Pointer to network data buffer
793 *
794 * This api is for ipv4 packet.
795 *
796 * Return: true if packet is EAPOL packet
797 * false otherwise.
798 */
799 bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data);
800
801 /**
802 * __qdf_nbuf_data_is_ipv4_igmp_pkt() - check if skb data is a igmp packet
803 * @data: Pointer to network data buffer
804 *
805 * This api is for ipv4 packet.
806 *
807 * Return: true if packet is igmp packet
808 * false otherwise.
809 */
810 bool __qdf_nbuf_data_is_ipv4_igmp_pkt(uint8_t *data);
811
812 /**
813 * __qdf_nbuf_data_is_ipv6_igmp_pkt() - check if skb data is a igmp packet
814 * @data: Pointer to network data buffer
815 *
816 * This api is for ipv6 packet.
817 *
818 * Return: true if packet is igmp packet
819 * false otherwise.
820 */
821 bool __qdf_nbuf_data_is_ipv6_igmp_pkt(uint8_t *data);
822
823 /**
824 * __qdf_nbuf_is_ipv4_igmp_leave_pkt() - check if skb is a igmp leave packet
825 * @buf: Pointer to network buffer
826 *
827 * This api is for ipv4 packet.
828 *
829 * Return: true if packet is igmp packet
830 * false otherwise.
831 */
832 bool __qdf_nbuf_is_ipv4_igmp_leave_pkt(__qdf_nbuf_t buf);
833
834 /**
835 * __qdf_nbuf_is_ipv6_igmp_leave_pkt() - check if skb is a igmp leave packet
836 * @buf: Pointer to network buffer
837 *
838 * This api is for ipv6 packet.
839 *
840 * Return: true if packet is igmp packet
841 * false otherwise.
842 */
843 bool __qdf_nbuf_is_ipv6_igmp_leave_pkt(__qdf_nbuf_t buf);
844
845 /**
846 * __qdf_nbuf_data_is_ipv4_arp_pkt() - check if skb data is a arp packet
847 * @data: Pointer to network data buffer
848 *
849 * This api is for ipv4 packet.
850 *
851 * Return: true if packet is ARP packet
852 * false otherwise.
853 */
854 bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data);
855
856 /**
857 * __qdf_nbuf_is_bcast_pkt() - is destination address broadcast
858 * @nbuf: sk buff
859 *
860 * Return: true if packet is broadcast
861 * false otherwise
862 */
863 bool __qdf_nbuf_is_bcast_pkt(__qdf_nbuf_t nbuf);
864
865 /**
866 * __qdf_nbuf_is_mcast_replay() - is multicast replay packet
867 * @nbuf: sk buff
868 *
869 * Return: true if packet is multicast replay
870 * false otherwise
871 */
872 bool __qdf_nbuf_is_mcast_replay(__qdf_nbuf_t nbuf);
873
874 /**
875 * __qdf_nbuf_is_arp_local() - check if local or non local arp
876 * @skb: pointer to sk_buff
877 *
878 * Return: true if local arp or false otherwise.
879 */
880 bool __qdf_nbuf_is_arp_local(struct sk_buff *skb);
881
882 /**
883 * __qdf_nbuf_data_is_arp_req() - check if skb data is a arp request
884 * @data: Pointer to network data buffer
885 *
886 * This api is for ipv4 packet.
887 *
888 * Return: true if packet is ARP request
889 * false otherwise.
890 */
891 bool __qdf_nbuf_data_is_arp_req(uint8_t *data);
892
893 /**
894 * __qdf_nbuf_data_is_arp_rsp() - check if skb data is a arp response
895 * @data: Pointer to network data buffer
896 *
897 * This api is for ipv4 packet.
898 *
899 * Return: true if packet is ARP response
900 * false otherwise.
901 */
902 bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data);
903
904 /**
905 * __qdf_nbuf_get_arp_src_ip() - get arp src IP
906 * @data: Pointer to network data buffer
907 *
908 * This api is for ipv4 packet.
909 *
910 * Return: ARP packet source IP value.
911 */
912 uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data);
913
914 /**
915 * __qdf_nbuf_get_arp_tgt_ip() - get arp target IP
916 * @data: Pointer to network data buffer
917 *
918 * This api is for ipv4 packet.
919 *
920 * Return: ARP packet target IP value.
921 */
922 uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data);
923
924 /**
925 * __qdf_nbuf_get_dns_domain_name() - get dns domain name
926 * @data: Pointer to network data buffer
927 * @len: length to copy
928 *
929 * This api is for dns domain name
930 *
931 * Return: dns domain name.
932 */
933 uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len);
934
935 /**
936 * __qdf_nbuf_data_is_dns_query() - check if skb data is a dns query
937 * @data: Pointer to network data buffer
938 *
939 * This api is for dns query packet.
940 *
941 * Return: true if packet is dns query packet.
942 * false otherwise.
943 */
944 bool __qdf_nbuf_data_is_dns_query(uint8_t *data);
945
946 /**
947 * __qdf_nbuf_data_is_dns_response() - check if skb data is a dns response
948 * @data: Pointer to network data buffer
949 *
950 * This api is for dns query response.
951 *
952 * Return: true if packet is dns response packet.
953 * false otherwise.
954 */
955 bool __qdf_nbuf_data_is_dns_response(uint8_t *data);
956
957 /**
958 * __qdf_nbuf_data_is_tcp_fin() - check if skb data is a tcp fin
959 * @data: Pointer to network data buffer
960 *
961 * This api is to check if the packet is tcp fin.
962 *
963 * Return: true if packet is tcp fin packet.
964 * false otherwise.
965 */
966 bool __qdf_nbuf_data_is_tcp_fin(uint8_t *data);
967
968 /**
969 * __qdf_nbuf_data_is_tcp_fin_ack() - check if skb data is a tcp fin ack
970 * @data: Pointer to network data buffer
971 *
972 * This api is to check if the tcp packet is fin ack.
973 *
974 * Return: true if packet is tcp fin ack packet.
975 * false otherwise.
976 */
977 bool __qdf_nbuf_data_is_tcp_fin_ack(uint8_t *data);
978
979 /**
980 * __qdf_nbuf_data_is_tcp_syn() - check if skb data is a tcp syn
981 * @data: Pointer to network data buffer
982 *
983 * This api is for tcp syn packet.
984 *
985 * Return: true if packet is tcp syn packet.
986 * false otherwise.
987 */
988 bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data);
989
990 /**
991 * __qdf_nbuf_data_is_tcp_syn_ack() - check if skb data is a tcp syn ack
992 * @data: Pointer to network data buffer
993 *
994 * This api is for tcp syn ack packet.
995 *
996 * Return: true if packet is tcp syn ack packet.
997 * false otherwise.
998 */
999 bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data);
1000
1001 /**
1002 * __qdf_nbuf_data_is_tcp_rst() - check if skb data is a tcp rst
1003 * @data: Pointer to network data buffer
1004 *
1005 * This api is to check if the tcp packet is rst.
1006 *
1007 * Return: true if packet is tcp rst packet.
1008 * false otherwise.
1009 */
1010 bool __qdf_nbuf_data_is_tcp_rst(uint8_t *data);
1011
1012 /**
1013 * __qdf_nbuf_data_is_tcp_ack() - check if skb data is a tcp ack
1014 * @data: Pointer to network data buffer
1015 *
1016 * This api is for tcp ack packet.
1017 *
1018 * Return: true if packet is tcp ack packet.
1019 * false otherwise.
1020 */
1021 bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data);
1022
1023 /**
1024 * __qdf_nbuf_data_get_tcp_src_port() - get tcp src port
1025 * @data: Pointer to network data buffer
1026 *
1027 * This api is for tcp packet.
1028 *
1029 * Return: tcp source port value.
1030 */
1031 uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data);
1032
1033 /**
1034 * __qdf_nbuf_data_get_tcp_dst_port() - get tcp dst port
1035 * @data: Pointer to network data buffer
1036 *
1037 * This api is for tcp packet.
1038 *
1039 * Return: tcp destination port value.
1040 */
1041 uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data);
1042
1043 /**
1044 * __qdf_nbuf_data_is_icmpv4_req() - check if skb data is a icmpv4 request
1045 * @data: Pointer to network data buffer
1046 *
1047 * This api is for ipv4 req packet.
1048 *
1049 * Return: true if packet is icmpv4 request
1050 * false otherwise.
1051 */
1052 bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data);
1053
1054 /**
1055 * __qdf_nbuf_data_is_icmpv4_redirect() - check if skb data is a icmpv4 redirect
1056 * @data: Pointer to network data buffer
1057 *
1058 * This api is for ipv4 req packet.
1059 *
1060 * Return: true if packet is icmpv4 redirect
1061 * false otherwise.
1062 */
1063 bool __qdf_nbuf_data_is_icmpv4_redirect(uint8_t *data);
1064
1065 /**
1066 * __qdf_nbuf_data_is_icmpv6_redirect() - check if skb data is a icmpv6 redirect
1067 * @data: Pointer to network data buffer
1068 *
1069 * This api is for ipv6 req packet.
1070 *
1071 * Return: true if packet is icmpv6 redirect
1072 * false otherwise.
1073 */
1074 bool __qdf_nbuf_data_is_icmpv6_redirect(uint8_t *data);
1075
1076 /**
1077 * __qdf_nbuf_data_is_icmpv4_rsp() - check if skb data is a icmpv4 res
1078 * @data: Pointer to network data buffer
1079 *
1080 * This api is for ipv4 res packet.
1081 *
1082 * Return: true if packet is icmpv4 response
1083 * false otherwise.
1084 */
1085 bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data);
1086
1087 /**
1088 * __qdf_nbuf_get_icmpv4_src_ip() - get icmpv4 src IP
1089 * @data: Pointer to network data buffer
1090 *
1091 * This api is for ipv4 packet.
1092 *
1093 * Return: icmpv4 packet source IP value.
1094 */
1095 uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data);
1096
1097 /**
1098 * __qdf_nbuf_get_icmpv4_tgt_ip() - get icmpv4 target IP
1099 * @data: Pointer to network data buffer
1100 *
1101 * This api is for ipv4 packet.
1102 *
1103 * Return: icmpv4 packet target IP value.
1104 */
1105 uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data);
1106
1107 /**
1108 * __qdf_nbuf_data_get_dhcp_subtype() - get the subtype
1109 * of DHCP packet.
1110 * @data: Pointer to DHCP packet data buffer
1111 *
1112 * This func. returns the subtype of DHCP packet.
1113 *
1114 * Return: subtype of the DHCP packet.
1115 */
1116 enum qdf_proto_subtype __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data);
1117
1118 /**
1119 * __qdf_nbuf_data_get_eapol_subtype() - get the subtype of EAPOL packet.
1120 * @data: Pointer to EAPOL packet data buffer
1121 *
1122 * This func. returns the subtype of EAPOL packet.
1123 *
1124 * Return: subtype of the EAPOL packet.
1125 */
1126 enum qdf_proto_subtype __qdf_nbuf_data_get_eapol_subtype(uint8_t *data);
1127
1128 /**
1129 * __qdf_nbuf_data_get_arp_subtype() - get the subtype
1130 * of ARP packet.
1131 * @data: Pointer to ARP packet data buffer
1132 *
1133 * This func. returns the subtype of ARP packet.
1134 *
1135 * Return: subtype of the ARP packet.
1136 */
1137 enum qdf_proto_subtype __qdf_nbuf_data_get_arp_subtype(uint8_t *data);
1138
1139 /**
1140 * __qdf_nbuf_data_get_icmp_subtype() - get the subtype
1141 * of IPV4 ICMP packet.
1142 * @data: Pointer to IPV4 ICMP packet data buffer
1143 *
1144 * This func. returns the subtype of ICMP packet.
1145 *
1146 * Return: subtype of the ICMP packet.
1147 */
1148 enum qdf_proto_subtype __qdf_nbuf_data_get_icmp_subtype(uint8_t *data);
1149
1150 /**
1151 * __qdf_nbuf_data_get_icmpv6_subtype() - get the subtype
1152 * of IPV6 ICMPV6 packet.
1153 * @data: Pointer to IPV6 ICMPV6 packet data buffer
1154 *
1155 * This func. returns the subtype of ICMPV6 packet.
1156 *
1157 * Return: subtype of the ICMPV6 packet.
1158 */
1159 enum qdf_proto_subtype __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data);
1160
1161 /**
1162 * __qdf_nbuf_data_get_ipv4_proto() - get the proto type
1163 * of IPV4 packet.
1164 * @data: Pointer to IPV4 packet data buffer
1165 *
1166 * This func. returns the proto type of IPV4 packet.
1167 *
1168 * Return: proto type of IPV4 packet.
1169 */
1170 uint8_t __qdf_nbuf_data_get_ipv4_proto(uint8_t *data);
1171
1172 /**
1173 * __qdf_nbuf_data_get_ipv6_proto() - get the proto type
1174 * of IPV6 packet.
1175 * @data: Pointer to IPV6 packet data buffer
1176 *
1177 * This func. returns the proto type of IPV6 packet.
1178 *
1179 * Return: proto type of IPV6 packet.
1180 */
1181 uint8_t __qdf_nbuf_data_get_ipv6_proto(uint8_t *data);
1182
1183 /**
1184 * __qdf_nbuf_data_get_ipv4_tos() - get the TOS type of IPv4 packet
1185 * @data: Pointer to skb payload
1186 *
1187 * This func. returns the TOS type of IPv4 packet.
1188 *
1189 * Return: TOS type of IPv4 packet.
1190 */
1191 uint8_t __qdf_nbuf_data_get_ipv4_tos(uint8_t *data);
1192
1193 /**
1194 * __qdf_nbuf_data_get_ipv6_tc() - get the TC field
1195 * of IPv6 packet.
1196 * @data: Pointer to IPv6 packet data buffer
1197 *
1198 * This func. returns the TC field of IPv6 packet.
1199 *
1200 * Return: traffic classification of IPv6 packet.
1201 */
1202 uint8_t __qdf_nbuf_data_get_ipv6_tc(uint8_t *data);
1203
1204 /**
1205 * __qdf_nbuf_data_set_ipv4_tos() - set the TOS for IPv4 packet
1206 * @data: pointer to skb payload
1207 * @tos: value of TOS to be set
1208 *
1209 * This func. set the TOS field of IPv4 packet.
1210 *
1211 * Return: None
1212 */
1213 void __qdf_nbuf_data_set_ipv4_tos(uint8_t *data, uint8_t tos);
1214
1215 /**
1216 * __qdf_nbuf_data_set_ipv6_tc() - set the TC field
1217 * of IPv6 packet.
1218 * @data: Pointer to skb payload
1219 * @tc: value to set to IPv6 header TC field
1220 *
1221 * This func. set the TC field of IPv6 header.
1222 *
1223 * Return: None
1224 */
1225 void __qdf_nbuf_data_set_ipv6_tc(uint8_t *data, uint8_t tc);
1226
1227 /**
1228 * __qdf_nbuf_is_ipv4_last_fragment() - Check if IPv4 packet is last fragment
1229 * @skb: Buffer
1230 *
1231 * This function checks IPv4 packet is last fragment or not.
1232 * Caller has to call this function for IPv4 packets only.
1233 *
1234 * Return: True if IPv4 packet is last fragment otherwise false
1235 */
1236 bool __qdf_nbuf_is_ipv4_last_fragment(struct sk_buff *skb);
1237
1238 /**
1239 * __qdf_nbuf_is_ipv4_fragment() - Check if IPv4 packet is fragment
1240 * @skb: Buffer
1241 *
1242 * This function checks IPv4 packet is fragment or not.
1243 * Caller has to call this function for IPv4 packets only.
1244 *
1245 * Return: True if IPv4 packet is fragment otherwise false
1246 */
1247 bool __qdf_nbuf_is_ipv4_fragment(struct sk_buff *skb);
1248
1249 bool __qdf_nbuf_is_ipv4_v6_pure_tcp_ack(struct sk_buff *skb);
1250
1251 #ifdef QDF_NBUF_GLOBAL_COUNT
1252 /**
1253 * __qdf_nbuf_count_get() - get nbuf global count
1254 *
1255 * Return: nbuf global count
1256 */
1257 int __qdf_nbuf_count_get(void);
1258
1259 /**
1260 * __qdf_nbuf_count_inc() - increment nbuf global count
1261 *
1262 * @nbuf: sk buff
1263 *
1264 * Return: void
1265 */
1266 void __qdf_nbuf_count_inc(struct sk_buff *nbuf);
1267
1268 /**
1269 * __qdf_nbuf_count_dec() - decrement nbuf global count
1270 *
1271 * @nbuf: sk buff
1272 *
1273 * Return: void
1274 */
1275 void __qdf_nbuf_count_dec(struct sk_buff *nbuf);
1276
1277 /**
1278 * __qdf_nbuf_mod_init() - Initialization routine for qdf_nbuf
1279 *
1280 * Return void
1281 */
1282 void __qdf_nbuf_mod_init(void);
1283
1284 /**
1285 * __qdf_nbuf_mod_exit() - Unintialization routine for qdf_nbuf
1286 *
1287 * Return void
1288 */
1289 void __qdf_nbuf_mod_exit(void);
1290
1291 #else
1292
__qdf_nbuf_count_get(void)1293 static inline int __qdf_nbuf_count_get(void)
1294 {
1295 return 0;
1296 }
1297
__qdf_nbuf_count_inc(struct sk_buff * skb)1298 static inline void __qdf_nbuf_count_inc(struct sk_buff *skb)
1299 {
1300 return;
1301 }
1302
__qdf_nbuf_count_dec(struct sk_buff * skb)1303 static inline void __qdf_nbuf_count_dec(struct sk_buff *skb)
1304 {
1305 return;
1306 }
1307
__qdf_nbuf_mod_init(void)1308 static inline void __qdf_nbuf_mod_init(void)
1309 {
1310 return;
1311 }
1312
__qdf_nbuf_mod_exit(void)1313 static inline void __qdf_nbuf_mod_exit(void)
1314 {
1315 return;
1316 }
1317 #endif
1318
1319 /**
1320 * __qdf_to_status() - OS to QDF status conversion
1321 * @error : OS error
1322 *
1323 * Return: QDF status
1324 */
__qdf_to_status(signed int error)1325 static inline QDF_STATUS __qdf_to_status(signed int error)
1326 {
1327 switch (error) {
1328 case 0:
1329 return QDF_STATUS_SUCCESS;
1330 case ENOMEM:
1331 case -ENOMEM:
1332 return QDF_STATUS_E_NOMEM;
1333 default:
1334 return QDF_STATUS_E_NOSUPPORT;
1335 }
1336 }
1337
1338 /**
1339 * __qdf_nbuf_cat() - link two nbufs
1340 * @dst: Buffer to piggyback into
1341 * @src: Buffer to put
1342 *
1343 * Concat two nbufs, the new buf(src) is piggybacked into the older one.
1344 * It is callers responsibility to free the src skb.
1345 *
1346 * Return: QDF_STATUS (status of the call) if failed the src skb
1347 * is released
1348 */
1349 static inline QDF_STATUS
__qdf_nbuf_cat(struct sk_buff * dst,struct sk_buff * src)1350 __qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
1351 {
1352 QDF_STATUS error = 0;
1353
1354 qdf_assert(dst && src);
1355
1356 /*
1357 * Since pskb_expand_head unconditionally reallocates the skb->head
1358 * buffer, first check whether the current buffer is already large
1359 * enough.
1360 */
1361 if (skb_tailroom(dst) < src->len) {
1362 error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
1363 if (error)
1364 return __qdf_to_status(error);
1365 }
1366
1367 memcpy(skb_tail_pointer(dst), src->data, src->len);
1368 skb_put(dst, src->len);
1369 return __qdf_to_status(error);
1370 }
1371
1372 /*
1373 * nbuf manipulation routines
1374 */
1375 /**
1376 * __qdf_nbuf_headroom() - return the amount of tail space available
1377 * @skb: Pointer to network buffer
1378 *
1379 * Return: amount of tail room
1380 */
__qdf_nbuf_headroom(struct sk_buff * skb)1381 static inline int __qdf_nbuf_headroom(struct sk_buff *skb)
1382 {
1383 return skb_headroom(skb);
1384 }
1385
1386 /**
1387 * __qdf_nbuf_tailroom() - return the amount of tail space available
1388 * @skb: Pointer to network buffer
1389 *
1390 * Return: amount of tail room
1391 */
__qdf_nbuf_tailroom(struct sk_buff * skb)1392 static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb)
1393 {
1394 return skb_tailroom(skb);
1395 }
1396
1397 /**
1398 * __qdf_nbuf_put_tail() - Puts data in the end
1399 * @skb: Pointer to network buffer
1400 * @size: size to be pushed
1401 *
1402 * Return: data pointer of this buf where new data has to be
1403 * put, or NULL if there is not enough room in this buf.
1404 */
__qdf_nbuf_put_tail(struct sk_buff * skb,size_t size)1405 static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
1406 {
1407 if (skb_tailroom(skb) < size) {
1408 if (unlikely(pskb_expand_head(skb, 0,
1409 size - skb_tailroom(skb), GFP_ATOMIC))) {
1410 __qdf_nbuf_count_dec(skb);
1411 dev_kfree_skb_any(skb);
1412 return NULL;
1413 }
1414 }
1415 return skb_put(skb, size);
1416 }
1417
1418 /**
1419 * __qdf_nbuf_trim_tail() - trim data out from the end
1420 * @skb: Pointer to network buffer
1421 * @size: size to be popped
1422 *
1423 * Return: none
1424 */
__qdf_nbuf_trim_tail(struct sk_buff * skb,size_t size)1425 static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
1426 {
1427 return skb_trim(skb, skb->len - size);
1428 }
1429
1430
1431 /*
1432 * prototypes. Implemented in qdf_nbuf.c
1433 */
1434
1435 /**
1436 * __qdf_nbuf_get_tx_cksum() - get tx checksum
1437 * @skb: Pointer to network buffer
1438 *
1439 * Return: TX checksum value
1440 */
1441 qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb);
1442
1443 /**
1444 * __qdf_nbuf_set_rx_cksum() - set rx checksum
1445 * @skb: Pointer to network buffer
1446 * @cksum: Pointer to checksum value
1447 *
1448 * Return: QDF_STATUS
1449 */
1450 QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb,
1451 qdf_nbuf_rx_cksum_t *cksum);
1452
1453 /**
1454 * __qdf_nbuf_get_tid() - get tid
1455 * @skb: Pointer to network buffer
1456 *
1457 * Return: tid
1458 */
1459 uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb);
1460
1461 /**
1462 * __qdf_nbuf_set_tid() - set tid
1463 * @skb: Pointer to network buffer
1464 * @tid: TID value to set
1465 *
1466 * Return: none
1467 */
1468 void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
1469
1470 /**
1471 * __qdf_nbuf_get_exemption_type() - get exemption type
1472 * @skb: Pointer to network buffer
1473 *
1474 * Return: exemption type
1475 */
1476 uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb);
1477
1478 /**
1479 * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free.
1480 * @skb: sk_buff handle
1481 *
1482 * Return: none
1483 */
1484
1485 void __qdf_nbuf_ref(struct sk_buff *skb);
1486
1487 /**
1488 * __qdf_nbuf_shared() - Check whether the buffer is shared
1489 * @skb: sk_buff buffer
1490 *
1491 * Return: true if more than one person has a reference to this buffer.
1492 */
1493 int __qdf_nbuf_shared(struct sk_buff *skb);
1494
1495 /**
1496 * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb,
1497 * @skb: sk buff
1498 *
1499 * Return: number of fragments
1500 */
__qdf_nbuf_get_nr_frags(struct sk_buff * skb)1501 static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb)
1502 {
1503 return skb_shinfo(skb)->nr_frags;
1504 }
1505
1506 /**
1507 * __qdf_nbuf_get_nr_frags_in_fraglist() - return the number of fragments
1508 * @skb: sk buff
1509 *
1510 * This API returns a total number of fragments from the fraglist
1511 * Return: total number of fragments
1512 */
__qdf_nbuf_get_nr_frags_in_fraglist(struct sk_buff * skb)1513 static inline uint32_t __qdf_nbuf_get_nr_frags_in_fraglist(struct sk_buff *skb)
1514 {
1515 uint32_t num_frag = 0;
1516 struct sk_buff *list = NULL;
1517
1518 num_frag = skb_shinfo(skb)->nr_frags;
1519 skb_walk_frags(skb, list)
1520 num_frag += skb_shinfo(list)->nr_frags;
1521
1522 return num_frag;
1523 }
1524
1525 /*
1526 * qdf_nbuf_pool_delete() implementation - do nothing in linux
1527 */
1528 #define __qdf_nbuf_pool_delete(osdev)
1529
1530 /**
1531 * __qdf_nbuf_copy() - returns a private copy of the skb
1532 * @skb: Pointer to network buffer
1533 *
1534 * This API returns a private copy of the skb, the skb returned is completely
1535 * modifiable by callers
1536 *
1537 * Return: skb or NULL
1538 */
__qdf_nbuf_copy(struct sk_buff * skb)1539 static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb)
1540 {
1541 struct sk_buff *skb_new = NULL;
1542
1543 skb_new = skb_copy(skb, GFP_ATOMIC);
1544 if (skb_new) {
1545 __qdf_nbuf_count_inc(skb_new);
1546 }
1547 return skb_new;
1548 }
1549
1550 #define __qdf_nbuf_reserve skb_reserve
1551
1552 /**
1553 * __qdf_nbuf_set_data_pointer() - set buffer data pointer
1554 * @skb: Pointer to network buffer
1555 * @data: data pointer
1556 *
1557 * Return: none
1558 */
1559 static inline void
__qdf_nbuf_set_data_pointer(struct sk_buff * skb,uint8_t * data)1560 __qdf_nbuf_set_data_pointer(struct sk_buff *skb, uint8_t *data)
1561 {
1562 skb->data = data;
1563 }
1564
1565 /**
1566 * __qdf_nbuf_set_len() - set buffer data length
1567 * @skb: Pointer to network buffer
1568 * @len: data length
1569 *
1570 * Return: none
1571 */
1572 static inline void
__qdf_nbuf_set_len(struct sk_buff * skb,uint32_t len)1573 __qdf_nbuf_set_len(struct sk_buff *skb, uint32_t len)
1574 {
1575 skb->len = len;
1576 }
1577
1578 /**
1579 * __qdf_nbuf_set_tail_pointer() - set buffer data tail pointer
1580 * @skb: Pointer to network buffer
1581 * @len: skb data length
1582 *
1583 * Return: none
1584 */
1585 static inline void
__qdf_nbuf_set_tail_pointer(struct sk_buff * skb,int len)1586 __qdf_nbuf_set_tail_pointer(struct sk_buff *skb, int len)
1587 {
1588 skb_set_tail_pointer(skb, len);
1589 }
1590
1591 /**
1592 * __qdf_nbuf_unlink_no_lock() - unlink an skb from skb queue
1593 * @skb: Pointer to network buffer
1594 * @list: list to use
1595 *
1596 * This is a lockless version, driver must acquire locks if it
1597 * needs to synchronize
1598 *
1599 * Return: none
1600 */
1601 static inline void
__qdf_nbuf_unlink_no_lock(struct sk_buff * skb,struct sk_buff_head * list)1602 __qdf_nbuf_unlink_no_lock(struct sk_buff *skb, struct sk_buff_head *list)
1603 {
1604 __skb_unlink(skb, list);
1605 }
1606
1607 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
1608 /**
1609 * __qdf_nbuf_is_dev_scratch_supported() - dev_scratch support for network
1610 * buffer in kernel
1611 *
1612 * Return: true if dev_scratch is supported
1613 * false if dev_scratch is not supported
1614 */
__qdf_nbuf_is_dev_scratch_supported(void)1615 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1616 {
1617 return true;
1618 }
1619
1620 /**
1621 * __qdf_nbuf_get_dev_scratch() - get dev_scratch of network buffer
1622 * @skb: Pointer to network buffer
1623 *
1624 * Return: dev_scratch if dev_scratch supported
1625 * 0 if dev_scratch not supported
1626 */
__qdf_nbuf_get_dev_scratch(struct sk_buff * skb)1627 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1628 {
1629 return skb->dev_scratch;
1630 }
1631
1632 /**
1633 * __qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer
1634 * @skb: Pointer to network buffer
1635 * @value: value to be set in dev_scratch of network buffer
1636 *
1637 * Return: void
1638 */
1639 static inline void
__qdf_nbuf_set_dev_scratch(struct sk_buff * skb,unsigned long value)1640 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1641 {
1642 skb->dev_scratch = value;
1643 }
1644 #else
__qdf_nbuf_is_dev_scratch_supported(void)1645 static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
1646 {
1647 return false;
1648 }
1649
__qdf_nbuf_get_dev_scratch(struct sk_buff * skb)1650 static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
1651 {
1652 return 0;
1653 }
1654
1655 static inline void
__qdf_nbuf_set_dev_scratch(struct sk_buff * skb,unsigned long value)1656 __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
1657 {
1658 }
1659 #endif /* KERNEL_VERSION(4, 14, 0) */
1660
1661 /**
1662 * __qdf_nbuf_head() - return the pointer the skb's head pointer
1663 * @skb: Pointer to network buffer
1664 *
1665 * Return: Pointer to head buffer
1666 */
__qdf_nbuf_head(struct sk_buff * skb)1667 static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb)
1668 {
1669 return skb->head;
1670 }
1671
1672 /**
1673 * __qdf_nbuf_data() - return the pointer to data header in the skb
1674 * @skb: Pointer to network buffer
1675 *
1676 * Return: Pointer to skb data
1677 */
__qdf_nbuf_data(struct sk_buff * skb)1678 static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb)
1679 {
1680 return skb->data;
1681 }
1682
__qdf_nbuf_data_addr(struct sk_buff * skb)1683 static inline uint8_t *__qdf_nbuf_data_addr(struct sk_buff *skb)
1684 {
1685 return (uint8_t *)&skb->data;
1686 }
1687
1688 /**
1689 * __qdf_nbuf_get_protocol() - return the protocol value of the skb
1690 * @skb: Pointer to network buffer
1691 *
1692 * Return: skb protocol
1693 */
__qdf_nbuf_get_protocol(struct sk_buff * skb)1694 static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb)
1695 {
1696 return skb->protocol;
1697 }
1698
1699 /**
1700 * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
1701 * @skb: Pointer to network buffer
1702 *
1703 * Return: skb ip_summed
1704 */
__qdf_nbuf_get_ip_summed(struct sk_buff * skb)1705 static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb)
1706 {
1707 return skb->ip_summed;
1708 }
1709
1710 /**
1711 * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
1712 * @skb: Pointer to network buffer
1713 * @ip_summed: ip checksum
1714 *
1715 * Return: none
1716 */
__qdf_nbuf_set_ip_summed(struct sk_buff * skb,uint8_t ip_summed)1717 static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb,
1718 uint8_t ip_summed)
1719 {
1720 skb->ip_summed = ip_summed;
1721 }
1722
1723 /**
1724 * __qdf_nbuf_get_priority() - return the priority value of the skb
1725 * @skb: Pointer to network buffer
1726 *
1727 * Return: skb priority
1728 */
__qdf_nbuf_get_priority(struct sk_buff * skb)1729 static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb)
1730 {
1731 return skb->priority;
1732 }
1733
1734 /**
1735 * __qdf_nbuf_set_priority() - sets the priority value of the skb
1736 * @skb: Pointer to network buffer
1737 * @p: priority
1738 *
1739 * Return: none
1740 */
__qdf_nbuf_set_priority(struct sk_buff * skb,uint32_t p)1741 static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
1742 {
1743 skb->priority = p;
1744 }
1745
1746 /**
1747 * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb
1748 * @skb: Current skb
1749 * @skb_next: Next skb
1750 *
1751 * Return: void
1752 */
1753 static inline void
__qdf_nbuf_set_next(struct sk_buff * skb,struct sk_buff * skb_next)1754 __qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
1755 {
1756 skb->next = skb_next;
1757 }
1758
1759 /**
1760 * __qdf_nbuf_next() - return the next skb pointer of the current skb
1761 * @skb: Current skb
1762 *
1763 * Return: the next skb pointed to by the current skb
1764 */
__qdf_nbuf_next(struct sk_buff * skb)1765 static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb)
1766 {
1767 return skb->next;
1768 }
1769
1770 /**
1771 * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
1772 * @skb: Current skb
1773 * @skb_next: Next skb
1774 *
1775 * This fn is used to link up extensions to the head skb. Does not handle
1776 * linking to the head
1777 *
1778 * Return: none
1779 */
1780 static inline void
__qdf_nbuf_set_next_ext(struct sk_buff * skb,struct sk_buff * skb_next)1781 __qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
1782 {
1783 skb->next = skb_next;
1784 }
1785
1786 /**
1787 * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb
1788 * @skb: Current skb
1789 *
1790 * Return: the next skb pointed to by the current skb
1791 */
__qdf_nbuf_next_ext(struct sk_buff * skb)1792 static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb)
1793 {
1794 return skb->next;
1795 }
1796
1797 /**
1798 * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head
1799 * @skb_head: head_buf nbuf holding head segment (single)
1800 * @ext_list: nbuf list holding linked extensions to the head
1801 * @ext_len: Total length of all buffers in the extension list
1802 *
1803 * This function is used to link up a list of packet extensions (seg1, 2,* ...)
1804 * to the nbuf holding the head segment (seg0)
1805 *
1806 * Return: none
1807 */
1808 static inline void
__qdf_nbuf_append_ext_list(struct sk_buff * skb_head,struct sk_buff * ext_list,size_t ext_len)1809 __qdf_nbuf_append_ext_list(struct sk_buff *skb_head,
1810 struct sk_buff *ext_list, size_t ext_len)
1811 {
1812 skb_shinfo(skb_head)->frag_list = ext_list;
1813 skb_head->data_len += ext_len;
1814 skb_head->len += ext_len;
1815 }
1816
1817 /**
1818 * __qdf_nbuf_get_shinfo() - return the shared info of the skb
1819 * @head_buf: Pointer to network buffer
1820 *
1821 * Return: skb shared info from head buf
1822 */
1823 static inline
__qdf_nbuf_get_shinfo(struct sk_buff * head_buf)1824 struct skb_shared_info *__qdf_nbuf_get_shinfo(struct sk_buff *head_buf)
1825 {
1826 return skb_shinfo(head_buf);
1827 }
1828
1829 /**
1830 * __qdf_nbuf_get_ext_list() - Get the link to extended nbuf list.
1831 * @head_buf: Network buf holding head segment (single)
1832 *
1833 * This ext_list is populated when we have Jumbo packet, for example in case of
1834 * monitor mode amsdu packet reception, and are stiched using frags_list.
1835 *
1836 * Return: Network buf list holding linked extensions from head buf.
1837 */
__qdf_nbuf_get_ext_list(struct sk_buff * head_buf)1838 static inline struct sk_buff *__qdf_nbuf_get_ext_list(struct sk_buff *head_buf)
1839 {
1840 return (skb_shinfo(head_buf)->frag_list);
1841 }
1842
1843 /**
1844 * __qdf_nbuf_get_age() - return the checksum value of the skb
1845 * @skb: Pointer to network buffer
1846 *
1847 * Return: checksum value
1848 */
__qdf_nbuf_get_age(struct sk_buff * skb)1849 static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb)
1850 {
1851 return skb->csum;
1852 }
1853
1854 /**
1855 * __qdf_nbuf_set_age() - sets the checksum value of the skb
1856 * @skb: Pointer to network buffer
1857 * @v: Value
1858 *
1859 * Return: none
1860 */
__qdf_nbuf_set_age(struct sk_buff * skb,uint32_t v)1861 static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
1862 {
1863 skb->csum = v;
1864 }
1865
1866 /**
1867 * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
1868 * @skb: Pointer to network buffer
1869 * @adj: Adjustment value
1870 *
1871 * Return: none
1872 */
__qdf_nbuf_adj_age(struct sk_buff * skb,uint32_t adj)1873 static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
1874 {
1875 skb->csum -= adj;
1876 }
1877
1878 /**
1879 * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb
1880 * @skb: Pointer to network buffer
1881 * @offset: Offset value
1882 * @len: Length
1883 * @to: Destination pointer
1884 *
1885 * Return: length of the copy bits for skb
1886 */
1887 static inline int32_t
__qdf_nbuf_copy_bits(struct sk_buff * skb,int32_t offset,int32_t len,void * to)1888 __qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
1889 {
1890 return skb_copy_bits(skb, offset, to, len);
1891 }
1892
1893 /**
1894 * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
1895 * @skb: Pointer to network buffer
1896 * @len: Packet length
1897 *
1898 * Return: none
1899 */
__qdf_nbuf_set_pktlen(struct sk_buff * skb,uint32_t len)1900 static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
1901 {
1902 if (skb->len > len) {
1903 skb_trim(skb, len);
1904 } else {
1905 if (skb_tailroom(skb) < len - skb->len) {
1906 if (unlikely(pskb_expand_head(skb, 0,
1907 len - skb->len - skb_tailroom(skb),
1908 GFP_ATOMIC))) {
1909 QDF_DEBUG_PANIC(
1910 "SKB tailroom is lessthan requested length."
1911 " tail-room: %u, len: %u, skb->len: %u",
1912 skb_tailroom(skb), len, skb->len);
1913 __qdf_nbuf_count_dec(skb);
1914 dev_kfree_skb_any(skb);
1915 }
1916 }
1917 skb_put(skb, (len - skb->len));
1918 }
1919 }
1920
1921 /**
1922 * __qdf_nbuf_set_protocol() - sets the protocol value of the skb
1923 * @skb: Pointer to network buffer
1924 * @protocol: Protocol type
1925 *
1926 * Return: none
1927 */
1928 static inline void
__qdf_nbuf_set_protocol(struct sk_buff * skb,uint16_t protocol)1929 __qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
1930 {
1931 skb->protocol = protocol;
1932 }
1933
1934 #define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \
1935 (QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi))
1936
1937 #define __qdf_nbuf_get_tx_htt2_frm(skb) \
1938 QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)
1939
1940 /**
1941 * __qdf_dmaaddr_to_32s() - return high and low parts of dma_addr
1942 * @dmaaddr: DMA address
1943 * @lo: low 32-bits of @dmaaddr
1944 * @hi: high 32-bits of @dmaaddr
1945 *
1946 * Returns the high and low 32-bits of the DMA addr in the provided ptrs
1947 *
1948 * Return: N/A
1949 */
1950 void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
1951 uint32_t *lo, uint32_t *hi);
1952
1953 /**
1954 * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf
1955 * into segments
1956 * @osdev: qdf device handle
1957 * @skb: network buffer to be segmented
1958 * @tso_info: This is the output. The information about the
1959 * TSO segments will be populated within this.
1960 *
1961 * This function fragments a TCP jumbo packet into smaller
1962 * segments to be transmitted by the driver. It chains the TSO
1963 * segments created into a list.
1964 *
1965 * Return: number of TSO segments
1966 */
1967 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
1968 struct qdf_tso_info_t *tso_info);
1969
1970 /**
1971 * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element
1972 *
1973 * @osdev: qdf device handle
1974 * @tso_seg: TSO segment element to be unmapped
1975 * @is_last_seg: whether this is last tso seg or not
1976 *
1977 * Return: none
1978 */
1979 void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
1980 struct qdf_tso_seg_elem_t *tso_seg,
1981 bool is_last_seg);
1982
1983 #ifdef FEATURE_TSO
1984 /**
1985 * __qdf_nbuf_get_tcp_payload_len() - function to return the tcp
1986 * payload len
1987 * @skb: buffer
1988 *
1989 * Return: size
1990 */
1991 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb);
1992
1993 /**
1994 * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
1995 * into segments
1996 * @skb: network buffer to be segmented
1997 *
1998 * This function fragments a TCP jumbo packet into smaller
1999 * segments to be transmitted by the driver. It chains the TSO
2000 * segments created into a list.
2001 *
2002 * Return: number of segments
2003 */
2004 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
2005
2006 #else
2007 static inline
__qdf_nbuf_get_tcp_payload_len(struct sk_buff * skb)2008 size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
2009 {
2010 return 0;
2011 }
2012
__qdf_nbuf_get_tso_num_seg(struct sk_buff * skb)2013 static inline uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
2014 {
2015 return 0;
2016 }
2017
2018 #endif /* FEATURE_TSO */
2019
__qdf_nbuf_is_tso(struct sk_buff * skb)2020 static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)
2021 {
2022 if (skb_is_gso(skb) &&
2023 (skb_is_gso_v6(skb) ||
2024 (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)))
2025 return true;
2026 else
2027 return false;
2028 }
2029
2030 struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb);
2031
2032 int __qdf_nbuf_get_users(struct sk_buff *skb);
2033
2034 /**
2035 * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
2036 * and get hw_classify by peeking
2037 * into packet
2038 * @skb: Network buffer (skb on Linux)
2039 * @pkt_type: Pkt type (from enum htt_pkt_type)
2040 * @pkt_subtype: Bit 4 of this field in HTT descriptor
2041 * needs to be set in case of CE classification support
2042 * Is set by this macro.
2043 * @hw_classify: This is a flag which is set to indicate
2044 * CE classification is enabled.
2045 * Do not set this bit for VLAN packets
2046 * OR for mcast / bcast frames.
2047 *
2048 * This macro parses the payload to figure out relevant Tx meta-data e.g.
2049 * whether to enable tx_classify bit in CE.
2050 *
2051 * Overrides pkt_type only if required for 802.3 frames (original ethernet)
2052 * If protocol is less than ETH_P_802_3_MIN (0x600), then
2053 * it is the length and a 802.3 frame else it is Ethernet Type II
2054 * (RFC 894).
2055 * Bit 4 in pkt_subtype is the tx_classify bit
2056 *
2057 * Return: void
2058 */
2059 #define __qdf_nbuf_tx_info_get(skb, pkt_type, \
2060 pkt_subtype, hw_classify) \
2061 do { \
2062 struct ethhdr *eh = (struct ethhdr *)skb->data; \
2063 uint16_t ether_type = ntohs(eh->h_proto); \
2064 bool is_mc_bc; \
2065 \
2066 is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) || \
2067 is_multicast_ether_addr((uint8_t *)eh); \
2068 \
2069 if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) { \
2070 hw_classify = 1; \
2071 pkt_subtype = 0x01 << \
2072 HTT_TX_CLASSIFY_BIT_S; \
2073 } \
2074 \
2075 if (unlikely(ether_type < ETH_P_802_3_MIN)) \
2076 pkt_type = htt_pkt_type_ethernet; \
2077 \
2078 } while (0)
2079
2080 /*
2081 * nbuf private buffer routines
2082 */
2083
2084 /**
2085 * __qdf_nbuf_peek_header() - return the header's addr & m_len
2086 * @skb: Pointer to network buffer
2087 * @addr: Pointer to store header's addr
2088 * @len: network buffer length
2089 *
2090 * Return: none
2091 */
2092 static inline void
__qdf_nbuf_peek_header(struct sk_buff * skb,uint8_t ** addr,uint32_t * len)2093 __qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
2094 {
2095 *addr = skb->data;
2096 *len = skb->len;
2097 }
2098
2099 /**
2100 * typedef __qdf_nbuf_queue_t - network buffer queue
2101 * @head: Head pointer
2102 * @tail: Tail pointer
2103 * @qlen: Queue length
2104 */
2105 typedef struct __qdf_nbuf_qhead {
2106 struct sk_buff *head;
2107 struct sk_buff *tail;
2108 unsigned int qlen;
2109 } __qdf_nbuf_queue_t;
2110
2111 /******************Functions *************/
2112
2113 /**
2114 * __qdf_nbuf_queue_init() - initiallize the queue head
2115 * @qhead: Queue head
2116 *
2117 * Return: QDF status
2118 */
__qdf_nbuf_queue_init(__qdf_nbuf_queue_t * qhead)2119 static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead)
2120 {
2121 memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead));
2122 return QDF_STATUS_SUCCESS;
2123 }
2124
2125 /**
2126 * __qdf_nbuf_queue_add() - add an skb in the tail of the queue
2127 * @qhead: Queue head
2128 * @skb: Pointer to network buffer
2129 *
2130 * This is a lockless version, driver must acquire locks if it
2131 * needs to synchronize
2132 *
2133 * Return: none
2134 */
2135 static inline void
__qdf_nbuf_queue_add(__qdf_nbuf_queue_t * qhead,struct sk_buff * skb)2136 __qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb)
2137 {
2138 skb->next = NULL; /*Nullify the next ptr */
2139
2140 if (!qhead->head)
2141 qhead->head = skb;
2142 else
2143 qhead->tail->next = skb;
2144
2145 qhead->tail = skb;
2146 qhead->qlen++;
2147 }
2148
2149 /**
2150 * __qdf_nbuf_queue_append() - Append src list at the end of dest list
2151 * @dest: target netbuf queue
2152 * @src: source netbuf queue
2153 *
2154 * Return: target netbuf queue
2155 */
2156 static inline __qdf_nbuf_queue_t *
__qdf_nbuf_queue_append(__qdf_nbuf_queue_t * dest,__qdf_nbuf_queue_t * src)2157 __qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src)
2158 {
2159 if (!dest)
2160 return NULL;
2161 else if (!src || !(src->head))
2162 return dest;
2163
2164 if (!(dest->head))
2165 dest->head = src->head;
2166 else
2167 dest->tail->next = src->head;
2168
2169 dest->tail = src->tail;
2170 dest->qlen += src->qlen;
2171 return dest;
2172 }
2173
2174 /**
2175 * __qdf_nbuf_queue_insert_head() - add an skb at the head of the queue
2176 * @qhead: Queue head
2177 * @skb: Pointer to network buffer
2178 *
2179 * This is a lockless version, driver must acquire locks if it needs to
2180 * synchronize
2181 *
2182 * Return: none
2183 */
2184 static inline void
__qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t * qhead,__qdf_nbuf_t skb)2185 __qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb)
2186 {
2187 if (!qhead->head) {
2188 /*Empty queue Tail pointer Must be updated */
2189 qhead->tail = skb;
2190 }
2191 skb->next = qhead->head;
2192 qhead->head = skb;
2193 qhead->qlen++;
2194 }
2195
2196 /**
2197 * __qdf_nbuf_queue_remove_last() - remove a skb from the tail of the queue
2198 * @qhead: Queue head
2199 *
2200 * This is a lockless version. Driver should take care of the locks
2201 *
2202 * Return: skb or NULL
2203 */
2204 static inline struct sk_buff *
__qdf_nbuf_queue_remove_last(__qdf_nbuf_queue_t * qhead)2205 __qdf_nbuf_queue_remove_last(__qdf_nbuf_queue_t *qhead)
2206 {
2207 __qdf_nbuf_t tmp_tail, node = NULL;
2208
2209 if (qhead->head) {
2210 qhead->qlen--;
2211 tmp_tail = qhead->tail;
2212 node = qhead->head;
2213 if (qhead->head == qhead->tail) {
2214 qhead->head = NULL;
2215 qhead->tail = NULL;
2216 return node;
2217 } else {
2218 while (tmp_tail != node->next)
2219 node = node->next;
2220 qhead->tail = node;
2221 return node->next;
2222 }
2223 }
2224 return node;
2225 }
2226
2227 /**
2228 * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue
2229 * @qhead: Queue head
2230 *
2231 * This is a lockless version. Driver should take care of the locks
2232 *
2233 * Return: skb or NULL
2234 */
2235 static inline
__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t * qhead)2236 struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead)
2237 {
2238 __qdf_nbuf_t tmp = NULL;
2239
2240 if (qhead->head) {
2241 qhead->qlen--;
2242 tmp = qhead->head;
2243 if (qhead->head == qhead->tail) {
2244 qhead->head = NULL;
2245 qhead->tail = NULL;
2246 } else {
2247 qhead->head = tmp->next;
2248 }
2249 tmp->next = NULL;
2250 }
2251 return tmp;
2252 }
2253
2254 /**
2255 * __qdf_nbuf_queue_first() - returns the first skb in the queue
2256 * @qhead: head of queue
2257 *
2258 * Return: NULL if the queue is empty
2259 */
2260 static inline struct sk_buff *
__qdf_nbuf_queue_first(__qdf_nbuf_queue_t * qhead)2261 __qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead)
2262 {
2263 return qhead->head;
2264 }
2265
2266 /**
2267 * __qdf_nbuf_queue_last() - returns the last skb in the queue
2268 * @qhead: head of queue
2269 *
2270 * Return: NULL if the queue is empty
2271 */
2272 static inline struct sk_buff *
__qdf_nbuf_queue_last(__qdf_nbuf_queue_t * qhead)2273 __qdf_nbuf_queue_last(__qdf_nbuf_queue_t *qhead)
2274 {
2275 return qhead->tail;
2276 }
2277
2278 /**
2279 * __qdf_nbuf_queue_len() - return the queue length
2280 * @qhead: Queue head
2281 *
2282 * Return: Queue length
2283 */
__qdf_nbuf_queue_len(__qdf_nbuf_queue_t * qhead)2284 static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead)
2285 {
2286 return qhead->qlen;
2287 }
2288
2289 /**
2290 * __qdf_nbuf_queue_next() - return the next skb from packet chain
2291 * @skb: Pointer to network buffer
2292 *
2293 * This API returns the next skb from packet chain, remember the skb is
2294 * still in the queue
2295 *
2296 * Return: NULL if no packets are there
2297 */
__qdf_nbuf_queue_next(struct sk_buff * skb)2298 static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb)
2299 {
2300 return skb->next;
2301 }
2302
2303 /**
2304 * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not
2305 * @qhead: Queue head
2306 *
2307 * Return: true if length is 0 else false
2308 */
__qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t * qhead)2309 static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead)
2310 {
2311 return qhead->qlen == 0;
2312 }
2313
2314 /*
2315 * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
2316 * Because the queue head will most likely put in some structure,
2317 * we don't use pointer type as the definition.
2318 */
2319
2320 /*
2321 * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
2322 * Because the queue head will most likely put in some structure,
2323 * we don't use pointer type as the definition.
2324 */
2325
2326 static inline void
__qdf_nbuf_set_send_complete_flag(struct sk_buff * skb,bool flag)2327 __qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag)
2328 {
2329 }
2330
2331 /**
2332 * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact
2333 * expands the headroom
2334 * in the data region. In case of failure the skb is released.
2335 * @skb: sk buff
2336 * @headroom: size of headroom
2337 *
2338 * Return: skb or NULL
2339 */
2340 static inline struct sk_buff *
__qdf_nbuf_realloc_headroom(struct sk_buff * skb,uint32_t headroom)2341 __qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom)
2342 {
2343 if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
2344 __qdf_nbuf_count_dec(skb);
2345 dev_kfree_skb_any(skb);
2346 skb = NULL;
2347 }
2348 return skb;
2349 }
2350
2351 /**
2352 * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact
2353 * exapnds the tailroom
2354 * in data region. In case of failure it releases the skb.
2355 * @skb: sk buff
2356 * @tailroom: size of tailroom
2357 *
2358 * Return: skb or NULL
2359 */
2360 static inline struct sk_buff *
__qdf_nbuf_realloc_tailroom(struct sk_buff * skb,uint32_t tailroom)2361 __qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom)
2362 {
2363 if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC)))
2364 return skb;
2365 /**
2366 * unlikely path
2367 */
2368 __qdf_nbuf_count_dec(skb);
2369 dev_kfree_skb_any(skb);
2370 return NULL;
2371 }
2372
2373 /**
2374 * __qdf_nbuf_linearize() - skb linearize
2375 * @skb: sk buff
2376 *
2377 * create a version of the specified nbuf whose contents
2378 * can be safely modified without affecting other
2379 * users.If the nbuf is non-linear then this function
2380 * linearize. if unable to linearize returns -ENOMEM on
2381 * success 0 is returned
2382 *
2383 * Return: 0 on Success, -ENOMEM on failure is returned.
2384 */
2385 static inline int
__qdf_nbuf_linearize(struct sk_buff * skb)2386 __qdf_nbuf_linearize(struct sk_buff *skb)
2387 {
2388 return skb_linearize(skb);
2389 }
2390
2391 /**
2392 * __qdf_nbuf_unshare() - skb unshare
2393 * @skb: sk buff
2394 *
2395 * create a version of the specified nbuf whose contents
2396 * can be safely modified without affecting other
2397 * users.If the nbuf is a clone then this function
2398 * creates a new copy of the data. If the buffer is not
2399 * a clone the original buffer is returned.
2400 *
2401 * Return: skb or NULL
2402 */
2403 static inline struct sk_buff *
__qdf_nbuf_unshare(struct sk_buff * skb)2404 __qdf_nbuf_unshare(struct sk_buff *skb)
2405 {
2406 struct sk_buff *skb_new;
2407
2408 __qdf_frag_count_dec(__qdf_nbuf_get_nr_frags(skb));
2409
2410 skb_new = skb_unshare(skb, GFP_ATOMIC);
2411 if (skb_new)
2412 __qdf_frag_count_inc(__qdf_nbuf_get_nr_frags(skb_new));
2413
2414 return skb_new;
2415 }
2416
2417 /**
2418 * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not
2419 * @skb: sk buff
2420 *
2421 * Return: true/false
2422 */
__qdf_nbuf_is_cloned(struct sk_buff * skb)2423 static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb)
2424 {
2425 return skb_cloned(skb);
2426 }
2427
2428 /**
2429 * __qdf_nbuf_pool_init() - init pool
2430 * @net: net handle
2431 *
2432 * Return: QDF status
2433 */
__qdf_nbuf_pool_init(qdf_net_handle_t net)2434 static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net)
2435 {
2436 return QDF_STATUS_SUCCESS;
2437 }
2438
2439 /*
2440 * adf_nbuf_pool_delete() implementation - do nothing in linux
2441 */
2442 #define __qdf_nbuf_pool_delete(osdev)
2443
2444 /**
2445 * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure
2446 * release the skb.
2447 * @skb: sk buff
2448 * @headroom: size of headroom
2449 * @tailroom: size of tailroom
2450 *
2451 * Return: skb or NULL
2452 */
2453 static inline struct sk_buff *
__qdf_nbuf_expand(struct sk_buff * skb,uint32_t headroom,uint32_t tailroom)2454 __qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom)
2455 {
2456 if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC)))
2457 return skb;
2458
2459 __qdf_nbuf_count_dec(skb);
2460 dev_kfree_skb_any(skb);
2461 return NULL;
2462 }
2463
2464 /**
2465 * __qdf_nbuf_copy_expand() - copy and expand nbuf
2466 * @buf: Network buf instance
2467 * @headroom: Additional headroom to be added
2468 * @tailroom: Additional tailroom to be added
2469 *
2470 * Return: New nbuf that is a copy of buf, with additional head and tailroom
2471 * or NULL if there is no memory
2472 */
2473 static inline struct sk_buff *
__qdf_nbuf_copy_expand(struct sk_buff * buf,int headroom,int tailroom)2474 __qdf_nbuf_copy_expand(struct sk_buff *buf, int headroom, int tailroom)
2475 {
2476 struct sk_buff *copy;
2477 copy = skb_copy_expand(buf, headroom, tailroom, GFP_ATOMIC);
2478 if (copy)
2479 __qdf_nbuf_count_inc(copy);
2480
2481 return copy;
2482 }
2483
2484 /**
2485 * __qdf_nbuf_has_fraglist() - check buf has fraglist
2486 * @buf: Network buf instance
2487 *
2488 * Return: True, if buf has frag_list else return False
2489 */
2490 static inline bool
__qdf_nbuf_has_fraglist(struct sk_buff * buf)2491 __qdf_nbuf_has_fraglist(struct sk_buff *buf)
2492 {
2493 return skb_has_frag_list(buf);
2494 }
2495
2496 /**
2497 * __qdf_nbuf_get_last_frag_list_nbuf() - Get last frag_list nbuf
2498 * @buf: Network buf instance
2499 *
2500 * Return: Network buf instance
2501 */
2502 static inline struct sk_buff *
__qdf_nbuf_get_last_frag_list_nbuf(struct sk_buff * buf)2503 __qdf_nbuf_get_last_frag_list_nbuf(struct sk_buff *buf)
2504 {
2505 struct sk_buff *list;
2506
2507 if (!__qdf_nbuf_has_fraglist(buf))
2508 return NULL;
2509
2510 for (list = skb_shinfo(buf)->frag_list; list->next; list = list->next)
2511 ;
2512
2513 return list;
2514 }
2515
2516 /**
2517 * __qdf_nbuf_get_ref_fraglist() - get reference to fragments
2518 * @buf: Network buf instance
2519 *
2520 * Return: void
2521 */
2522 static inline void
__qdf_nbuf_get_ref_fraglist(struct sk_buff * buf)2523 __qdf_nbuf_get_ref_fraglist(struct sk_buff *buf)
2524 {
2525 struct sk_buff *list;
2526
2527 skb_walk_frags(buf, list)
2528 skb_get(list);
2529 }
2530
2531 /**
2532 * __qdf_nbuf_tx_cksum_info() - tx checksum info
2533 * @skb: Network buffer
2534 * @hdr_off:
2535 * @where:
2536 *
2537 * Return: true/false
2538 */
2539 static inline bool
__qdf_nbuf_tx_cksum_info(struct sk_buff * skb,uint8_t ** hdr_off,uint8_t ** where)2540 __qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off,
2541 uint8_t **where)
2542 {
2543 qdf_assert(0);
2544 return false;
2545 }
2546
2547 /**
2548 * __qdf_nbuf_reset_ctxt() - mem zero control block
2549 * @nbuf: buffer
2550 *
2551 * Return: none
2552 */
__qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)2553 static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)
2554 {
2555 qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
2556 }
2557
2558 /**
2559 * __qdf_nbuf_network_header() - get network header
2560 * @buf: buffer
2561 *
2562 * Return: network header pointer
2563 */
__qdf_nbuf_network_header(__qdf_nbuf_t buf)2564 static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf)
2565 {
2566 return skb_network_header(buf);
2567 }
2568
2569 /**
2570 * __qdf_nbuf_transport_header() - get transport header
2571 * @buf: buffer
2572 *
2573 * Return: transport header pointer
2574 */
__qdf_nbuf_transport_header(__qdf_nbuf_t buf)2575 static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf)
2576 {
2577 return skb_transport_header(buf);
2578 }
2579
2580 /**
2581 * __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS),
2582 * passed as part of network buffer by network stack
2583 * @skb: sk buff
2584 *
2585 * Return: TCP MSS size
2586 *
2587 */
__qdf_nbuf_tcp_tso_size(struct sk_buff * skb)2588 static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb)
2589 {
2590 return skb_shinfo(skb)->gso_size;
2591 }
2592
2593 /**
2594 * __qdf_nbuf_init() - Re-initializes the skb for re-use
2595 * @nbuf: sk buff
2596 *
2597 * Return: none
2598 */
2599 void __qdf_nbuf_init(__qdf_nbuf_t nbuf);
2600
2601 /**
2602 * __qdf_nbuf_get_cb() - returns a pointer to skb->cb
2603 * @nbuf: sk buff
2604 *
2605 * Return: void ptr
2606 */
2607 static inline void *
__qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)2608 __qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)
2609 {
2610 return (void *)nbuf->cb;
2611 }
2612
2613 /**
2614 * __qdf_nbuf_headlen() - return the length of linear buffer of the skb
2615 * @skb: sk buff
2616 *
2617 * Return: head size
2618 */
2619 static inline size_t
__qdf_nbuf_headlen(struct sk_buff * skb)2620 __qdf_nbuf_headlen(struct sk_buff *skb)
2621 {
2622 return skb_headlen(skb);
2623 }
2624
2625 /**
2626 * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not.
2627 * @skb: sk buff
2628 *
2629 * Return: true/false
2630 */
__qdf_nbuf_tso_tcp_v4(struct sk_buff * skb)2631 static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb)
2632 {
2633 return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0;
2634 }
2635
2636 /**
2637 * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not.
2638 * @skb: sk buff
2639 *
2640 * Return: true/false
2641 */
__qdf_nbuf_tso_tcp_v6(struct sk_buff * skb)2642 static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb)
2643 {
2644 return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0;
2645 }
2646
2647 /**
2648 * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr length of the skb
2649 * @skb: sk buff
2650 *
2651 * Return: size of l2+l3+l4 header length
2652 */
__qdf_nbuf_l2l3l4_hdr_len(struct sk_buff * skb)2653 static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb)
2654 {
2655 return skb_transport_offset(skb) + tcp_hdrlen(skb);
2656 }
2657
2658 /**
2659 * __qdf_nbuf_get_tcp_hdr_len() - return TCP header length of the skb
2660 * @skb: sk buff
2661 *
2662 * Return: size of TCP header length
2663 */
__qdf_nbuf_get_tcp_hdr_len(struct sk_buff * skb)2664 static inline size_t __qdf_nbuf_get_tcp_hdr_len(struct sk_buff *skb)
2665 {
2666 return tcp_hdrlen(skb);
2667 }
2668
2669 /**
2670 * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not
2671 * @skb: sk buff
2672 *
2673 * Return: true/false
2674 */
__qdf_nbuf_is_nonlinear(struct sk_buff * skb)2675 static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb)
2676 {
2677 if (skb_is_nonlinear(skb))
2678 return true;
2679 else
2680 return false;
2681 }
2682
2683 /**
2684 * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the skb
2685 * @skb: sk buff
2686 *
2687 * Return: TCP sequence number
2688 */
__qdf_nbuf_tcp_seq(struct sk_buff * skb)2689 static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb)
2690 {
2691 return ntohl(tcp_hdr(skb)->seq);
2692 }
2693
2694 /**
2695 * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space
2696 *@skb: sk buff
2697 *
2698 * Return: data pointer to typecast into your priv structure
2699 */
2700 static inline char *
__qdf_nbuf_get_priv_ptr(struct sk_buff * skb)2701 __qdf_nbuf_get_priv_ptr(struct sk_buff *skb)
2702 {
2703 return &skb->cb[8];
2704 }
2705
2706 /**
2707 * __qdf_nbuf_mark_wakeup_frame() - mark wakeup frame.
2708 * @buf: Pointer to nbuf
2709 *
2710 * Return: None
2711 */
2712 static inline void
__qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf)2713 __qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf)
2714 {
2715 buf->mark |= QDF_MARK_FIRST_WAKEUP_PACKET;
2716 }
2717
2718 /**
2719 * __qdf_nbuf_record_rx_queue() - set rx queue in skb
2720 *
2721 * @skb: sk buff
2722 * @queue_id: Queue id
2723 *
2724 * Return: void
2725 */
2726 static inline void
__qdf_nbuf_record_rx_queue(struct sk_buff * skb,uint16_t queue_id)2727 __qdf_nbuf_record_rx_queue(struct sk_buff *skb, uint16_t queue_id)
2728 {
2729 skb_record_rx_queue(skb, queue_id);
2730 }
2731
2732 /**
2733 * __qdf_nbuf_get_queue_mapping() - get the queue mapping set by linux kernel
2734 *
2735 * @skb: sk buff
2736 *
2737 * Return: Queue mapping
2738 */
2739 static inline uint16_t
__qdf_nbuf_get_queue_mapping(struct sk_buff * skb)2740 __qdf_nbuf_get_queue_mapping(struct sk_buff *skb)
2741 {
2742 return skb->queue_mapping;
2743 }
2744
2745 /**
2746 * __qdf_nbuf_set_queue_mapping() - get the queue mapping set by linux kernel
2747 *
2748 * @skb: sk buff
2749 * @val: queue_id
2750 *
2751 */
2752 static inline void
__qdf_nbuf_set_queue_mapping(struct sk_buff * skb,uint16_t val)2753 __qdf_nbuf_set_queue_mapping(struct sk_buff *skb, uint16_t val)
2754 {
2755 skb_set_queue_mapping(skb, val);
2756 }
2757
2758 /**
2759 * __qdf_nbuf_set_timestamp() - set the timestamp for frame
2760 *
2761 * @skb: sk buff
2762 *
2763 * Return: void
2764 */
2765 static inline void
__qdf_nbuf_set_timestamp(struct sk_buff * skb)2766 __qdf_nbuf_set_timestamp(struct sk_buff *skb)
2767 {
2768 __net_timestamp(skb);
2769 }
2770
2771 /**
2772 * __qdf_nbuf_get_timestamp() - get the timestamp for frame
2773 *
2774 * @skb: sk buff
2775 *
2776 * Return: timestamp stored in skb in ms
2777 */
2778 static inline uint64_t
__qdf_nbuf_get_timestamp(struct sk_buff * skb)2779 __qdf_nbuf_get_timestamp(struct sk_buff *skb)
2780 {
2781 return ktime_to_ms(skb_get_ktime(skb));
2782 }
2783
2784 /**
2785 * __qdf_nbuf_get_timestamp_us() - get the timestamp for frame
2786 *
2787 * @skb: sk buff
2788 *
2789 * Return: timestamp stored in skb in us
2790 */
2791 static inline uint64_t
__qdf_nbuf_get_timestamp_us(struct sk_buff * skb)2792 __qdf_nbuf_get_timestamp_us(struct sk_buff *skb)
2793 {
2794 return ktime_to_us(skb_get_ktime(skb));
2795 }
2796
2797 /**
2798 * __qdf_nbuf_get_timedelta_ms() - get time difference in ms
2799 *
2800 * @skb: sk buff
2801 *
2802 * Return: time difference in ms
2803 */
2804 static inline uint64_t
__qdf_nbuf_get_timedelta_ms(struct sk_buff * skb)2805 __qdf_nbuf_get_timedelta_ms(struct sk_buff *skb)
2806 {
2807 return ktime_to_ms(net_timedelta(skb->tstamp));
2808 }
2809
2810 /**
2811 * __qdf_nbuf_get_timedelta_us() - get time difference in micro seconds
2812 *
2813 * @skb: sk buff
2814 *
2815 * Return: time difference in micro seconds
2816 */
2817 static inline uint64_t
__qdf_nbuf_get_timedelta_us(struct sk_buff * skb)2818 __qdf_nbuf_get_timedelta_us(struct sk_buff *skb)
2819 {
2820 return ktime_to_us(net_timedelta(skb->tstamp));
2821 }
2822
2823 /**
2824 * __qdf_nbuf_orphan() - orphan a nbuf
2825 * @skb: sk buff
2826 *
2827 * If a buffer currently has an owner then we call the
2828 * owner's destructor function
2829 *
2830 * Return: void
2831 */
__qdf_nbuf_orphan(struct sk_buff * skb)2832 static inline void __qdf_nbuf_orphan(struct sk_buff *skb)
2833 {
2834 return skb_orphan(skb);
2835 }
2836
2837 /**
2838 * __qdf_nbuf_get_end_offset() - Return the size of the nbuf from
2839 * head pointer to end pointer
2840 * @nbuf: qdf_nbuf_t
2841 *
2842 * Return: size of network buffer from head pointer to end
2843 * pointer
2844 */
__qdf_nbuf_get_end_offset(__qdf_nbuf_t nbuf)2845 static inline unsigned int __qdf_nbuf_get_end_offset(__qdf_nbuf_t nbuf)
2846 {
2847 return skb_end_offset(nbuf);
2848 }
2849
2850 /**
2851 * __qdf_nbuf_get_truesize() - Return the true size of the nbuf
2852 * including the header and variable data area
2853 * @skb: sk buff
2854 *
2855 * Return: size of network buffer
2856 */
__qdf_nbuf_get_truesize(struct sk_buff * skb)2857 static inline unsigned int __qdf_nbuf_get_truesize(struct sk_buff *skb)
2858 {
2859 return skb->truesize;
2860 }
2861
2862 /**
2863 * __qdf_nbuf_get_allocsize() - Return the actual size of the skb->head
2864 * excluding the header and variable data area
2865 * @skb: sk buff
2866 *
2867 * Return: actual allocated size of network buffer
2868 */
__qdf_nbuf_get_allocsize(struct sk_buff * skb)2869 static inline unsigned int __qdf_nbuf_get_allocsize(struct sk_buff *skb)
2870 {
2871 return SKB_WITH_OVERHEAD(skb->truesize) -
2872 SKB_DATA_ALIGN(sizeof(struct sk_buff));
2873 }
2874
2875 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
2876 /**
2877 * __qdf_record_nbuf_nbytes() - add or subtract the size of the nbuf
2878 * from the total skb mem and DP tx/rx skb mem
2879 * @nbytes: number of bytes
2880 * @dir: direction
2881 * @is_mapped: is mapped or unmapped memory
2882 *
2883 * Return: none
2884 */
__qdf_record_nbuf_nbytes(int nbytes,qdf_dma_dir_t dir,bool is_mapped)2885 static inline void __qdf_record_nbuf_nbytes(
2886 int nbytes, qdf_dma_dir_t dir, bool is_mapped)
2887 {
2888 if (is_mapped) {
2889 if (dir == QDF_DMA_TO_DEVICE) {
2890 qdf_mem_dp_tx_skb_cnt_inc();
2891 qdf_mem_dp_tx_skb_inc(nbytes);
2892 } else if (dir == QDF_DMA_FROM_DEVICE) {
2893 qdf_mem_dp_rx_skb_cnt_inc();
2894 qdf_mem_dp_rx_skb_inc(nbytes);
2895 }
2896 qdf_mem_skb_total_inc(nbytes);
2897 } else {
2898 if (dir == QDF_DMA_TO_DEVICE) {
2899 qdf_mem_dp_tx_skb_cnt_dec();
2900 qdf_mem_dp_tx_skb_dec(nbytes);
2901 } else if (dir == QDF_DMA_FROM_DEVICE) {
2902 qdf_mem_dp_rx_skb_cnt_dec();
2903 qdf_mem_dp_rx_skb_dec(nbytes);
2904 }
2905 qdf_mem_skb_total_dec(nbytes);
2906 }
2907 }
2908
2909 #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
__qdf_record_nbuf_nbytes(int nbytes,qdf_dma_dir_t dir,bool is_mapped)2910 static inline void __qdf_record_nbuf_nbytes(
2911 int nbytes, qdf_dma_dir_t dir, bool is_mapped)
2912 {
2913 }
2914 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
2915
2916 static inline struct sk_buff *
__qdf_nbuf_queue_head_dequeue(struct sk_buff_head * skb_queue_head)2917 __qdf_nbuf_queue_head_dequeue(struct sk_buff_head *skb_queue_head)
2918 {
2919 return skb_dequeue(skb_queue_head);
2920 }
2921
2922 static inline
__qdf_nbuf_queue_head_qlen(struct sk_buff_head * skb_queue_head)2923 uint32_t __qdf_nbuf_queue_head_qlen(struct sk_buff_head *skb_queue_head)
2924 {
2925 return skb_queue_head->qlen;
2926 }
2927
2928 static inline
__qdf_nbuf_queue_head_enqueue_tail(struct sk_buff_head * skb_queue_head,struct sk_buff * skb)2929 void __qdf_nbuf_queue_head_enqueue_tail(struct sk_buff_head *skb_queue_head,
2930 struct sk_buff *skb)
2931 {
2932 return skb_queue_tail(skb_queue_head, skb);
2933 }
2934
2935 static inline
__qdf_nbuf_queue_head_init(struct sk_buff_head * skb_queue_head)2936 void __qdf_nbuf_queue_head_init(struct sk_buff_head *skb_queue_head)
2937 {
2938 return skb_queue_head_init(skb_queue_head);
2939 }
2940
2941 static inline
__qdf_nbuf_queue_head_purge(struct sk_buff_head * skb_queue_head)2942 void __qdf_nbuf_queue_head_purge(struct sk_buff_head *skb_queue_head)
2943 {
2944 return skb_queue_purge(skb_queue_head);
2945 }
2946
2947 static inline
__qdf_nbuf_queue_empty(__qdf_nbuf_queue_head_t * nbuf_queue_head)2948 int __qdf_nbuf_queue_empty(__qdf_nbuf_queue_head_t *nbuf_queue_head)
2949 {
2950 return skb_queue_empty(nbuf_queue_head);
2951 }
2952
2953 /**
2954 * __qdf_nbuf_queue_head_lock() - Acquire the skb list lock
2955 * @skb_queue_head: skb list for which lock is to be acquired
2956 *
2957 * Return: void
2958 */
2959 static inline
__qdf_nbuf_queue_head_lock(struct sk_buff_head * skb_queue_head)2960 void __qdf_nbuf_queue_head_lock(struct sk_buff_head *skb_queue_head)
2961 {
2962 spin_lock_bh(&skb_queue_head->lock);
2963 }
2964
2965 /**
2966 * __qdf_nbuf_queue_head_unlock() - Release the skb list lock
2967 * @skb_queue_head: skb list for which lock is to be release
2968 *
2969 * Return: void
2970 */
2971 static inline
__qdf_nbuf_queue_head_unlock(struct sk_buff_head * skb_queue_head)2972 void __qdf_nbuf_queue_head_unlock(struct sk_buff_head *skb_queue_head)
2973 {
2974 spin_unlock_bh(&skb_queue_head->lock);
2975 }
2976
2977 /**
2978 * __qdf_nbuf_get_frag_size_by_idx() - Get nbuf frag size at index idx
2979 * @nbuf: qdf_nbuf_t
2980 * @idx: Index for which frag size is requested
2981 *
2982 * Return: Frag size
2983 */
__qdf_nbuf_get_frag_size_by_idx(__qdf_nbuf_t nbuf,uint8_t idx)2984 static inline unsigned int __qdf_nbuf_get_frag_size_by_idx(__qdf_nbuf_t nbuf,
2985 uint8_t idx)
2986 {
2987 unsigned int size = 0;
2988
2989 if (likely(idx < __QDF_NBUF_MAX_FRAGS))
2990 size = skb_frag_size(&skb_shinfo(nbuf)->frags[idx]);
2991 return size;
2992 }
2993
2994 /**
2995 * __qdf_nbuf_get_frag_addr() - Get nbuf frag address at index idx
2996 * @nbuf: qdf_nbuf_t
2997 * @idx: Index for which frag address is requested
2998 *
2999 * Return: Frag address in success, else NULL
3000 */
__qdf_nbuf_get_frag_addr(__qdf_nbuf_t nbuf,uint8_t idx)3001 static inline __qdf_frag_t __qdf_nbuf_get_frag_addr(__qdf_nbuf_t nbuf,
3002 uint8_t idx)
3003 {
3004 __qdf_frag_t frag_addr = NULL;
3005
3006 if (likely(idx < __QDF_NBUF_MAX_FRAGS))
3007 frag_addr = skb_frag_address(&skb_shinfo(nbuf)->frags[idx]);
3008 return frag_addr;
3009 }
3010
3011 /**
3012 * __qdf_nbuf_trim_add_frag_size() - Increase/Decrease frag_size by size
3013 * @nbuf: qdf_nbuf_t
3014 * @idx: Frag index
3015 * @size: Size by which frag_size needs to be increased/decreased
3016 * +Ve means increase, -Ve means decrease
3017 * @truesize: truesize
3018 */
__qdf_nbuf_trim_add_frag_size(__qdf_nbuf_t nbuf,uint8_t idx,int size,unsigned int truesize)3019 static inline void __qdf_nbuf_trim_add_frag_size(__qdf_nbuf_t nbuf, uint8_t idx,
3020 int size,
3021 unsigned int truesize)
3022 {
3023 skb_coalesce_rx_frag(nbuf, idx, size, truesize);
3024 }
3025
3026 /**
3027 * __qdf_nbuf_move_frag_page_offset() - Move frag page_offset by size
3028 * and adjust length by size.
3029 * @nbuf: qdf_nbuf_t
3030 * @idx: Frag index
3031 * @offset: Frag page offset should be moved by offset.
3032 * +Ve - Move offset forward.
3033 * -Ve - Move offset backward.
3034 *
3035 * Return: QDF_STATUS
3036 */
3037 QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
3038 int offset);
3039
3040 /**
3041 * __qdf_nbuf_remove_frag() - Remove frag from nbuf
3042 * @nbuf: nbuf pointer
3043 * @idx: frag idx need to be removed
3044 * @truesize: truesize of frag
3045 *
3046 * Return : void
3047 */
3048 void __qdf_nbuf_remove_frag(__qdf_nbuf_t nbuf, uint16_t idx, uint16_t truesize);
3049 /**
3050 * __qdf_nbuf_add_rx_frag() - Add frag to nbuf at nr_frag index
3051 * @buf: Frag pointer needs to be added in nbuf frag
3052 * @nbuf: qdf_nbuf_t where frag will be added
3053 * @offset: Offset in frag to be added to nbuf_frags
3054 * @frag_len: Frag length
3055 * @truesize: truesize
3056 * @take_frag_ref: Whether to take ref for frag or not
3057 * This bool must be set as per below comdition:
3058 * 1. False: If this frag is being added in any nbuf
3059 * for the first time after allocation.
3060 * 2. True: If frag is already attached part of any
3061 * nbuf.
3062 *
3063 * It takes ref_count based on boolean flag take_frag_ref
3064 */
3065 void __qdf_nbuf_add_rx_frag(__qdf_frag_t buf, __qdf_nbuf_t nbuf,
3066 int offset, int frag_len,
3067 unsigned int truesize, bool take_frag_ref);
3068
3069 /**
3070 * __qdf_nbuf_ref_frag() - get frag reference
3071 * @buf: Pointer to nbuf
3072 *
3073 * Return: void
3074 */
3075 void __qdf_nbuf_ref_frag(qdf_frag_t buf);
3076
3077 /**
3078 * __qdf_nbuf_set_mark() - Set nbuf mark
3079 * @buf: Pointer to nbuf
3080 * @mark: Value to set mark
3081 *
3082 * Return: None
3083 */
__qdf_nbuf_set_mark(__qdf_nbuf_t buf,uint32_t mark)3084 static inline void __qdf_nbuf_set_mark(__qdf_nbuf_t buf, uint32_t mark)
3085 {
3086 buf->mark = mark;
3087 }
3088
3089 /**
3090 * __qdf_nbuf_get_mark() - Get nbuf mark
3091 * @buf: Pointer to nbuf
3092 *
3093 * Return: Value of mark
3094 */
__qdf_nbuf_get_mark(__qdf_nbuf_t buf)3095 static inline uint32_t __qdf_nbuf_get_mark(__qdf_nbuf_t buf)
3096 {
3097 return buf->mark;
3098 }
3099
3100 /**
3101 * __qdf_nbuf_get_data_len() - Return the size of the nbuf from
3102 * the data pointer to the end pointer
3103 * @nbuf: qdf_nbuf_t
3104 *
3105 * Return: size of skb from data pointer to end pointer
3106 */
__qdf_nbuf_get_data_len(__qdf_nbuf_t nbuf)3107 static inline qdf_size_t __qdf_nbuf_get_data_len(__qdf_nbuf_t nbuf)
3108 {
3109 return (skb_end_pointer(nbuf) - nbuf->data);
3110 }
3111
3112 /**
3113 * __qdf_nbuf_set_data_len() - Return the data_len of the nbuf
3114 * @nbuf: qdf_nbuf_t
3115 * @len: data_len to be set
3116 *
3117 * Return: value of data_len
3118 */
3119 static inline
__qdf_nbuf_set_data_len(__qdf_nbuf_t nbuf,uint32_t len)3120 qdf_size_t __qdf_nbuf_set_data_len(__qdf_nbuf_t nbuf, uint32_t len)
3121 {
3122 return nbuf->data_len = len;
3123 }
3124
3125 /**
3126 * __qdf_nbuf_get_only_data_len() - Return the data_len of the nbuf
3127 * @nbuf: qdf_nbuf_t
3128 *
3129 * Return: value of data_len
3130 */
__qdf_nbuf_get_only_data_len(__qdf_nbuf_t nbuf)3131 static inline qdf_size_t __qdf_nbuf_get_only_data_len(__qdf_nbuf_t nbuf)
3132 {
3133 return nbuf->data_len;
3134 }
3135
3136 /**
3137 * __qdf_nbuf_set_hash() - set the hash of the buf
3138 * @buf: Network buf instance
3139 * @len: len to be set
3140 *
3141 * Return: None
3142 */
__qdf_nbuf_set_hash(__qdf_nbuf_t buf,uint32_t len)3143 static inline void __qdf_nbuf_set_hash(__qdf_nbuf_t buf, uint32_t len)
3144 {
3145 buf->hash = len;
3146 }
3147
3148 /**
3149 * __qdf_nbuf_set_sw_hash() - set the sw hash of the buf
3150 * @buf: Network buf instance
3151 * @len: len to be set
3152 *
3153 * Return: None
3154 */
__qdf_nbuf_set_sw_hash(__qdf_nbuf_t buf,uint32_t len)3155 static inline void __qdf_nbuf_set_sw_hash(__qdf_nbuf_t buf, uint32_t len)
3156 {
3157 buf->sw_hash = len;
3158 }
3159
3160 /**
3161 * __qdf_nbuf_set_csum_start() - set the csum start of the buf
3162 * @buf: Network buf instance
3163 * @len: len to be set
3164 *
3165 * Return: None
3166 */
__qdf_nbuf_set_csum_start(__qdf_nbuf_t buf,uint16_t len)3167 static inline void __qdf_nbuf_set_csum_start(__qdf_nbuf_t buf, uint16_t len)
3168 {
3169 buf->csum_start = len;
3170 }
3171
3172 /**
3173 * __qdf_nbuf_set_csum_offset() - set the csum offset of the buf
3174 * @buf: Network buf instance
3175 * @len: len to be set
3176 *
3177 * Return: None
3178 */
__qdf_nbuf_set_csum_offset(__qdf_nbuf_t buf,uint16_t len)3179 static inline void __qdf_nbuf_set_csum_offset(__qdf_nbuf_t buf, uint16_t len)
3180 {
3181 buf->csum_offset = len;
3182 }
3183
3184 /**
3185 * __qdf_nbuf_get_gso_segs() - Return the number of gso segments
3186 * @skb: Pointer to network buffer
3187 *
3188 * Return: Return the number of gso segments
3189 */
__qdf_nbuf_get_gso_segs(struct sk_buff * skb)3190 static inline uint16_t __qdf_nbuf_get_gso_segs(struct sk_buff *skb)
3191 {
3192 return skb_shinfo(skb)->gso_segs;
3193 }
3194
3195 /**
3196 * __qdf_nbuf_set_gso_segs() - set the number of gso segments
3197 * @skb: Pointer to network buffer
3198 * @val: val to be set
3199 *
3200 * Return: None
3201 */
__qdf_nbuf_set_gso_segs(struct sk_buff * skb,uint16_t val)3202 static inline void __qdf_nbuf_set_gso_segs(struct sk_buff *skb, uint16_t val)
3203 {
3204 skb_shinfo(skb)->gso_segs = val;
3205 }
3206
3207 /**
3208 * __qdf_nbuf_set_gso_type_udp_l4() - set the gso type to GSO UDP L4
3209 * @skb: Pointer to network buffer
3210 *
3211 * Return: None
3212 */
__qdf_nbuf_set_gso_type_udp_l4(struct sk_buff * skb)3213 static inline void __qdf_nbuf_set_gso_type_udp_l4(struct sk_buff *skb)
3214 {
3215 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
3216 }
3217
3218 /**
3219 * __qdf_nbuf_set_ip_summed_partial() - set the ip summed to CHECKSUM_PARTIAL
3220 * @skb: Pointer to network buffer
3221 *
3222 * Return: None
3223 */
__qdf_nbuf_set_ip_summed_partial(struct sk_buff * skb)3224 static inline void __qdf_nbuf_set_ip_summed_partial(struct sk_buff *skb)
3225 {
3226 skb->ip_summed = CHECKSUM_PARTIAL;
3227 }
3228
3229 /**
3230 * __qdf_nbuf_get_gso_size() - Return the number of gso size
3231 * @skb: Pointer to network buffer
3232 *
3233 * Return: Return the number of gso segments
3234 */
__qdf_nbuf_get_gso_size(struct sk_buff * skb)3235 static inline unsigned int __qdf_nbuf_get_gso_size(struct sk_buff *skb)
3236 {
3237 return skb_shinfo(skb)->gso_size;
3238 }
3239
3240 /**
3241 * __qdf_nbuf_set_gso_size() - Set the gso size in nbuf
3242 * @skb: Pointer to network buffer
3243 * @val: the number of GSO segments
3244 *
3245 * Return: None
3246 */
3247 static inline void
__qdf_nbuf_set_gso_size(struct sk_buff * skb,unsigned int val)3248 __qdf_nbuf_set_gso_size(struct sk_buff *skb, unsigned int val)
3249 {
3250 skb_shinfo(skb)->gso_size = val;
3251 }
3252
3253 /**
3254 * __qdf_nbuf_kfree() - Free nbuf using kfree
3255 * @skb: Pointer to network buffer
3256 *
3257 * This function is called to free the skb on failure cases
3258 *
3259 * Return: None
3260 */
__qdf_nbuf_kfree(struct sk_buff * skb)3261 static inline void __qdf_nbuf_kfree(struct sk_buff *skb)
3262 {
3263 kfree_skb(skb);
3264 }
3265
3266 /**
3267 * __qdf_nbuf_dev_kfree_list() - Free nbuf list using dev based os call
3268 * @nbuf_queue_head: Pointer to nbuf queue head
3269 *
3270 * This function is called to free the nbuf list on failure cases
3271 *
3272 * Return: None
3273 */
3274 void
3275 __qdf_nbuf_dev_kfree_list(__qdf_nbuf_queue_head_t *nbuf_queue_head);
3276
3277 /**
3278 * __qdf_nbuf_dev_queue_head() - queue a buffer using dev at the list head
3279 * @nbuf_queue_head: Pointer to skb list head
3280 * @buff: Pointer to nbuf
3281 *
3282 * This function is called to queue buffer at the skb list head
3283 *
3284 * Return: None
3285 */
3286 static inline void
__qdf_nbuf_dev_queue_head(__qdf_nbuf_queue_head_t * nbuf_queue_head,__qdf_nbuf_t buff)3287 __qdf_nbuf_dev_queue_head(__qdf_nbuf_queue_head_t *nbuf_queue_head,
3288 __qdf_nbuf_t buff)
3289 {
3290 __skb_queue_head(nbuf_queue_head, buff);
3291 }
3292
3293 /**
3294 * __qdf_nbuf_dev_kfree() - Free nbuf using dev based os call
3295 * @skb: Pointer to network buffer
3296 *
3297 * This function is called to free the skb on failure cases
3298 *
3299 * Return: None
3300 */
__qdf_nbuf_dev_kfree(struct sk_buff * skb)3301 static inline void __qdf_nbuf_dev_kfree(struct sk_buff *skb)
3302 {
3303 dev_kfree_skb(skb);
3304 }
3305
3306 /**
3307 * __qdf_nbuf_pkt_type_is_mcast() - check if skb pkt type is mcast
3308 * @skb: Network buffer
3309 *
3310 * Return: TRUE if skb pkt type is mcast
3311 * FALSE if not
3312 */
3313 static inline
__qdf_nbuf_pkt_type_is_mcast(struct sk_buff * skb)3314 bool __qdf_nbuf_pkt_type_is_mcast(struct sk_buff *skb)
3315 {
3316 return skb->pkt_type == PACKET_MULTICAST;
3317 }
3318
3319 /**
3320 * __qdf_nbuf_pkt_type_is_bcast() - check if skb pkt type is bcast
3321 * @skb: Network buffer
3322 *
3323 * Return: TRUE if skb pkt type is mcast
3324 * FALSE if not
3325 */
3326 static inline
__qdf_nbuf_pkt_type_is_bcast(struct sk_buff * skb)3327 bool __qdf_nbuf_pkt_type_is_bcast(struct sk_buff *skb)
3328 {
3329 return skb->pkt_type == PACKET_BROADCAST;
3330 }
3331
3332 /**
3333 * __qdf_nbuf_set_dev() - set dev of network buffer
3334 * @skb: Pointer to network buffer
3335 * @dev: value to be set in dev of network buffer
3336 *
3337 * Return: void
3338 */
3339 static inline
__qdf_nbuf_set_dev(struct sk_buff * skb,struct net_device * dev)3340 void __qdf_nbuf_set_dev(struct sk_buff *skb, struct net_device *dev)
3341 {
3342 skb->dev = dev;
3343 }
3344
3345 /**
3346 * __qdf_nbuf_get_dev_mtu() - get dev mtu in n/w buffer
3347 * @skb: Pointer to network buffer
3348 *
3349 * Return: dev mtu value in nbuf
3350 */
3351 static inline
__qdf_nbuf_get_dev_mtu(struct sk_buff * skb)3352 unsigned int __qdf_nbuf_get_dev_mtu(struct sk_buff *skb)
3353 {
3354 return skb->dev->mtu;
3355 }
3356
3357 /**
3358 * __qdf_nbuf_set_protocol_eth_type_trans() - set protocol using eth trans
3359 * os API
3360 * @skb: Pointer to network buffer
3361 *
3362 * Return: None
3363 */
3364 static inline
__qdf_nbuf_set_protocol_eth_type_trans(struct sk_buff * skb)3365 void __qdf_nbuf_set_protocol_eth_type_trans(struct sk_buff *skb)
3366 {
3367 skb->protocol = eth_type_trans(skb, skb->dev);
3368 }
3369
3370 /**
3371 * __qdf_nbuf_net_timedelta() - get time delta
3372 * @t: time as __qdf_ktime_t object
3373 *
3374 * Return: time delta as ktime_t object
3375 */
__qdf_nbuf_net_timedelta(qdf_ktime_t t)3376 static inline qdf_ktime_t __qdf_nbuf_net_timedelta(qdf_ktime_t t)
3377 {
3378 return net_timedelta(t);
3379 }
3380
3381 #ifdef CONFIG_NBUF_AP_PLATFORM
3382 #include <i_qdf_nbuf_w.h>
3383 #else
3384 #include <i_qdf_nbuf_m.h>
3385 #endif
3386 #endif /*_I_QDF_NET_BUF_H */
3387