1 /*
2 * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /**
21 * DOC: i_qdf_nbuf_m.h
22 *
23 * This file provides platform specific nbuf API's.
24 * Included by i_qdf_nbuf.h and should not be included
25 * directly from other files.
26 */
27
28 #ifndef _I_QDF_NBUF_M_H
29 #define _I_QDF_NBUF_M_H
30 /**
31 * struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
32 * - data passed between layers of the driver.
33 *
34 * Notes:
35 * 1. Hard limited to 48 bytes. Please count your bytes
36 * 2. The size of this structure has to be easily calculable and
37 * consistently so: do not use any conditional compile flags
38 * 3. Split into a common part followed by a tx/rx overlay
39 * 4. There is only one extra frag, which represents the HTC/HTT header
40 * 5. "ext_cb_pt" must be the first member in both TX and RX unions
41 * for the priv_cb_w since it must be at same offset for both
42 * TX and RX union
43 * 6. "ipa.owned" bit must be first member in both TX and RX unions
44 * for the priv_cb_m since it must be at same offset for both
45 * TX and RX union.
46 *
47 * @paddr : physical addressed retrieved by dma_map of nbuf->data
48 * @u: union of rx and tx data
49 * @u.rx: rx data
50 * @u.rx.dev: union of priv_cb_w and priv_cb_m
51 *
52 * @u.rx.dev.priv_cb_w:
53 * @u.rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
54 * @u.rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
55 * @u.rx.dev.priv_cb_w.msdu_len: length of RX packet
56 * @u.rx.dev.priv_cb_w.flag_intra_bss: flag to indicate this is intra bss packet
57 * @u.rx.dev.priv_cb_w.ipa_smmu_map: do IPA smmu map
58 * @u.rx.dev.priv_cb_w.peer_id: peer_id for RX packet
59 * @u.rx.dev.priv_cb_w.protocol_tag: protocol tag set by app for rcvd packet
60 * type
61 * @u.rx.dev.priv_cb_w.flow_tag: flow tag set by application for 5 tuples rcvd
62 *
63 * @u.rx.dev.priv_cb_m:
64 * @u.rx.dev.priv_cb_m.ipa.owned: packet owned by IPA
65 * @u.rx.dev.priv_cb_m.peer_cached_buf_frm: peer cached buffer
66 * @u.rx.dev.priv_cb_m.flush_ind: flush indication
67 * @u.rx.dev.priv_cb_m.packet_buf_pool: packet buff bool
68 * @u.rx.dev.priv_cb_m.l3_hdr_pad: L3 header padding offset
69 * @u.rx.dev.priv_cb_m.exc_frm: exception frame
70 * @u.rx.dev.priv_cb_m.ipa_smmu_map: do IPA smmu map
71 * @u.rx.dev.priv_cb_m.reo_dest_ind_or_sw_excpt: reo destination indication or
72 * sw exception bit from ring desc
73 * @u.rx.dev.priv_cb_m.lmac_id: lmac id for RX packet
74 * @u.rx.dev.priv_cb_m.fr_ds: from DS bit in RX packet
75 * @u.rx.dev.priv_cb_m.to_ds: to DS bit in RX packet
76 * @u.rx.dev.priv_cb_m.logical_link_id: link id of RX packet
77 * @u.rx.dev.priv_cb_m.reserved1: reserved bits
78 * @u.rx.dev.priv_cb_m.dp_ext: Union of tcp and ext structs
79 * @u.rx.dev.priv_cb_m.dp_ext.tcp: TCP structs
80 * @u.rx.dev.priv_cb_m.dp_ext.tcp.tcp_seq_num: TCP sequence number
81 * @u.rx.dev.priv_cb_m.dp_ext.tcp.tcp_ack_num: TCP ACK number
82 * @u.rx.dev.priv_cb_m.dp_ext.ext: Extension struct for other usage
83 * @u.rx.dev.priv_cb_m.dp_ext.ext.mpdu_seq: wifi MPDU sequence number
84 * @u.rx.dev.priv_cb_m.dp: Union of wifi3 and wifi2 structs
85 * @u.rx.dev.priv_cb_m.dp.wifi3: wifi3 data
86 * @u.rx.dev.priv_cb_m.dp.wifi3.msdu_len: length of RX packet
87 * @u.rx.dev.priv_cb_m.dp.wifi3.peer_id: peer_id for RX packet
88 * @u.rx.dev.priv_cb_m.dp.wifi2: wifi2 data
89 * @u.rx.dev.priv_cb_m.dp.wifi2.map_index:
90 * @u.rx.dev.priv_cb_m.lro_ctx: LRO context
91 *
92 * @u.rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible
93 * @u.rx.tcp_proto: L4 protocol is TCP
94 * @u.rx.tcp_pure_ack: A TCP ACK packet with no payload
95 * @u.rx.ipv6_proto: L3 protocol is IPV6
96 * @u.rx.ip_offset: offset to IP header
97 * @u.rx.tcp_offset: offset to TCP header
98 * @u.rx.rx_ctx_id: Rx context id
99 * @u.rx.fcs_err: FCS error
100 * @u.rx.is_raw_frame: RAW frame
101 * @u.rx.num_elements_in_list: number of elements in the nbuf list
102 *
103 * @u.rx.tcp_udp_chksum: L4 payload checksum
104 * @u.rx.tcp_win: TCP window size
105 *
106 * @u.rx.flow_id: 32bit flow id
107 *
108 * @u.rx.flag_chfrag_start: first MSDU in an AMSDU
109 * @u.rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU
110 * @u.rx.flag_chfrag_end: last MSDU in an AMSDU
111 * @u.rx.flag_retry: flag to indicate MSDU is retried
112 * @u.rx.flag_da_mcbc: flag to indicate mulicast or broadcast packets
113 * @u.rx.flag_da_valid: flag to indicate DA is valid for RX packet
114 * @u.rx.flag_sa_valid: flag to indicate SA is valid for RX packet
115 * @u.rx.flag_is_frag: flag to indicate skb has frag list
116 *
117 * @u.rx.trace: combined structure for DP and protocol trace
118 * @u.rx.trace.packet_state: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
119 * + (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
120 * @u.rx.trace.dp_trace: flag (Datapath trace)
121 * @u.rx.trace.packet_track: RX_DATA packet
122 * @u.rx.trace.rsrvd: enable packet logging
123 *
124 * @u.rx.vdev_id: vdev_id for RX pkt
125 * @u.rx.tid_val: tid value
126 * @u.rx.ftype: mcast2ucast, TSO, SG, MESH
127 *
128 * @u.tx: tx data
129 * @u.tx.dev: union of priv_cb_w and priv_cb_m
130 *
131 * @u.tx.dev.priv_cb_w:
132 * @u.tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
133 * @u.tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
134 *
135 * @u.tx.dev.priv_cb_m:
136 * @u.tx.dev.priv_cb_m:ipa: IPA-specific data
137 * @u.tx.dev.priv_cb_m.ipa.ipa.owned: packet owned by IPA
138 * @u.tx.dev.priv_cb_m.ipa.ipa.priv: private data, used by IPA
139 * @u.tx.dev.priv_cb_m.data_attr: value that is programmed in CE descr, includes
140 * + (1) CE classification enablement bit
141 * + (2) packet type (802.3 or Ethernet type II)
142 * + (3) packet offset (usually length of HTC/HTT descr)
143 * @u.tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw
144 * @u.tx.dev.priv_cb_m.dma_option: DMA options
145 * @u.tx.dev.priv_cb_m.dma_option.mgmt_desc_id: mgmt descriptor for tx
146 * completion cb
147 * @u.tx.dev.priv_cb_m.dma_option.dma_option.bi_map: flag to do bi-direction
148 * dma map
149 * @u.tx.dev.priv_cb_m.dma_option.dma_option.reserved: reserved bits for future
150 * use
151 * @u.tx.dev.priv_cb_m.flag_notify_comp: reserved
152 * @u.tx.dev.priv_cb_m.flag_ts_valid: flag to indicate field
153 * u.tx.pa_ts.ts_value is available, it must be cleared before fragment mapping
154 * @u.tx.dev.priv_cb_m.rsvd: reserved
155 * @u.tx.dev.priv_cb_m.reserved: reserved
156 *
157 * @u.tx.ftype: mcast2ucast, TSO, SG, MESH
158 * @u.tx.vdev_id: vdev (for protocol trace)
159 * @u.tx.len: length of efrag pointed by the above pointers
160 *
161 * @u.tx.flags: union of flag representations
162 * @u.tx.flags.bits: flags represent as individual bitmasks
163 * @u.tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream)
164 * @u.tx.flags.bits.num: number of extra frags ( 0 or 1)
165 * @u.tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream)
166 * @u.tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU
167 * @u.tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU
168 * @u.tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU
169 * @u.tx.flags.bits.flag_ext_header: extended flags
170 * @u.tx.flags.bits.is_critical: flag indicating a critical frame
171 * @u.tx.flags.u8: flags as a single u8
172 * @u.tx.trace: combined structure for DP and protocol trace
173 * @u.tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
174 * + (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
175 * @u.tx.trace.is_packet_priv:
176 * @u.tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
177 * @u.tx.trace.to_fw: Flag to indicate send this packet to FW
178 * @u.tx.trace.htt2_frm: flag (high-latency path only)
179 * @u.tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
180 * + (MGMT_ACTION)] - 4 bits
181 * @u.tx.trace.dp_trace: flag (Datapath trace)
182 * @u.tx.trace.is_bcast: flag (Broadcast packet)
183 * @u.tx.trace.is_mcast: flag (Multicast packet)
184 * @u.tx.trace.packet_type: flag (Packet type)
185 * @u.tx.trace.print: enable packet logging
186 *
187 * @u.tx.vaddr: virtual address of ~
188 * @u.tx.pa_ts.paddr: physical/DMA address of ~
189 * @u.tx.pa_ts.ts_value: driver ingress timestamp, it must be cleared before
190 * fragment mapping
191 */
192 struct qdf_nbuf_cb {
193 /* common */
194 qdf_paddr_t paddr; /* of skb->data */
195 /* valid only in one direction */
196 union {
197 /* Note: MAX: 40 bytes */
198 struct {
199 union {
200 struct {
201 void *ext_cb_ptr;
202 void *fctx;
203 uint16_t msdu_len : 14,
204 flag_intra_bss : 1,
205 ipa_smmu_map : 1;
206 uint16_t peer_id;
207 uint16_t protocol_tag;
208 uint16_t flow_tag;
209 } priv_cb_w;
210 struct {
211 /* ipa_owned bit is common between rx
212 * control block and tx control block.
213 * Do not change location of this bit.
214 */
215 uint32_t ipa_owned:1,
216 peer_cached_buf_frm:1,
217 flush_ind:1,
218 packet_buf_pool:1,
219 l3_hdr_pad:3,
220 /* exception frame flag */
221 exc_frm:1,
222 ipa_smmu_map:1,
223 reo_dest_ind_or_sw_excpt:5,
224 lmac_id:2,
225 fr_ds:1,
226 to_ds:1,
227 logical_link_id:4,
228 band:3,
229 reserved1:7;
230 union {
231 struct {
232 uint32_t tcp_seq_num;
233 uint32_t tcp_ack_num;
234 } tcp;
235 struct {
236 uint32_t mpdu_seq:12,
237 reserved:20;
238 uint32_t reserved1;
239 } ext;
240 } dp_ext;
241 union {
242 struct {
243 uint16_t msdu_len;
244 uint16_t peer_id;
245 } wifi3;
246 struct {
247 uint32_t map_index;
248 } wifi2;
249 } dp;
250 unsigned char *lro_ctx;
251 } priv_cb_m;
252 } dev;
253 uint32_t lro_eligible:1,
254 tcp_proto:1,
255 tcp_pure_ack:1,
256 ipv6_proto:1,
257 ip_offset:7,
258 tcp_offset:7,
259 rx_ctx_id:4,
260 fcs_err:1,
261 is_raw_frame:1,
262 num_elements_in_list:8;
263 uint32_t tcp_udp_chksum:16,
264 tcp_win:16;
265 uint32_t flow_id;
266 uint8_t flag_chfrag_start:1,
267 flag_chfrag_cont:1,
268 flag_chfrag_end:1,
269 flag_retry:1,
270 flag_da_mcbc:1,
271 flag_da_valid:1,
272 flag_sa_valid:1,
273 flag_is_frag:1;
274 union {
275 uint8_t packet_state;
276 uint8_t dp_trace:1,
277 packet_track:3,
278 rsrvd:4;
279 } trace;
280 uint16_t vdev_id:8,
281 tid_val:4,
282 ftype:4;
283 } rx;
284
285 /* Note: MAX: 40 bytes */
286 struct {
287 union {
288 struct {
289 void *ext_cb_ptr;
290 void *fctx;
291 } priv_cb_w;
292 struct {
293 /* ipa_owned bit is common between rx
294 * control block and tx control block.
295 * Do not change location of this bit.
296 */
297 struct {
298 uint32_t owned:1,
299 priv:31;
300 } ipa;
301 uint32_t data_attr;
302 uint16_t desc_id;
303 uint16_t mgmt_desc_id;
304 struct {
305 uint8_t bi_map:1,
306 reserved:7;
307 } dma_option;
308 uint8_t flag_notify_comp:1,
309 band:3,
310 flag_ts_valid:1,
311 rsvd:3;
312 uint8_t reserved[2];
313 } priv_cb_m;
314 } dev;
315 uint8_t ftype;
316 uint8_t vdev_id;
317 uint16_t len;
318 union {
319 struct {
320 uint8_t flag_efrag:1,
321 flag_nbuf:1,
322 num:1,
323 flag_chfrag_start:1,
324 flag_chfrag_cont:1,
325 flag_chfrag_end:1,
326 flag_ext_header:1,
327 is_critical:1;
328 } bits;
329 uint8_t u8;
330 } flags;
331 struct {
332 uint8_t packet_state:7,
333 is_packet_priv:1;
334 uint8_t packet_track:3,
335 to_fw:1,
336 htt2_frm:1,
337 proto_type:3;
338 uint8_t dp_trace:1,
339 is_bcast:1,
340 is_mcast:1,
341 packet_type:4,
342 print:1;
343 } trace;
344 unsigned char *vaddr;
345 union {
346 qdf_paddr_t paddr;
347 qdf_ktime_t ts_value;
348 } pa_ts;
349 } tx;
350 } u;
351 }; /* struct qdf_nbuf_cb: MAX 48 bytes */
352
353 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0))
354 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
355 (sizeof(struct qdf_nbuf_cb)) <=
356 sizeof_field(struct sk_buff, cb));
357 #else
358 QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
359 (sizeof(struct qdf_nbuf_cb)) <=
360 FIELD_SIZEOF(struct sk_buff, cb));
361 #endif
362
363 /*
364 * access macros to qdf_nbuf_cb
365 * Note: These macros can be used as L-values as well as R-values.
366 * When used as R-values, they effectively function as "get" macros
367 * When used as L_values, they effectively function as "set" macros
368 */
369
370 #define QDF_NBUF_CB_PADDR(skb) \
371 (((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
372
373 #define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
374 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
375 #define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
376 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
377 #define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
378 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
379 #define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
380 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
381 #define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
382 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
383 #define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
384 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
385 #define QDF_NBUF_CB_RX_CTX_ID(skb) \
386 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
387 #define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
388 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
389
390 #define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
391 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
392 #define QDF_NBUF_CB_RX_TCP_WIN(skb) \
393 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
394
395 #define QDF_NBUF_CB_RX_FLOW_ID(skb) \
396 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
397
398 #define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
399 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
400 #define QDF_NBUF_CB_RX_DP_TRACE(skb) \
401 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
402
403 #define QDF_NBUF_CB_RX_FTYPE(skb) \
404 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
405
406 #define QDF_NBUF_CB_RX_VDEV_ID(skb) \
407 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.vdev_id)
408
409 #define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
410 (((struct qdf_nbuf_cb *) \
411 ((skb)->cb))->u.rx.flag_chfrag_start)
412 #define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
413 (((struct qdf_nbuf_cb *) \
414 ((skb)->cb))->u.rx.flag_chfrag_cont)
415 #define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
416 (((struct qdf_nbuf_cb *) \
417 ((skb)->cb))->u.rx.flag_chfrag_end)
418
419 #define QDF_NBUF_CB_RX_DA_MCBC(skb) \
420 (((struct qdf_nbuf_cb *) \
421 ((skb)->cb))->u.rx.flag_da_mcbc)
422
423 #define QDF_NBUF_CB_RX_DA_VALID(skb) \
424 (((struct qdf_nbuf_cb *) \
425 ((skb)->cb))->u.rx.flag_da_valid)
426
427 #define QDF_NBUF_CB_RX_SA_VALID(skb) \
428 (((struct qdf_nbuf_cb *) \
429 ((skb)->cb))->u.rx.flag_sa_valid)
430
431 #define QDF_NBUF_CB_RX_RETRY_FLAG(skb) \
432 (((struct qdf_nbuf_cb *) \
433 ((skb)->cb))->u.rx.flag_retry)
434
435 #define QDF_NBUF_CB_RX_RAW_FRAME(skb) \
436 (((struct qdf_nbuf_cb *) \
437 ((skb)->cb))->u.rx.is_raw_frame)
438
439 #define QDF_NBUF_CB_RX_FROM_DS(skb) \
440 (((struct qdf_nbuf_cb *) \
441 ((skb)->cb))->u.rx.dev.priv_cb_m.fr_ds)
442
443 #define QDF_NBUF_CB_RX_TO_DS(skb) \
444 (((struct qdf_nbuf_cb *) \
445 ((skb)->cb))->u.rx.dev.priv_cb_m.to_ds)
446
447 #define QDF_NBUF_CB_RX_TID_VAL(skb) \
448 (((struct qdf_nbuf_cb *) \
449 ((skb)->cb))->u.rx.tid_val)
450
451 #define QDF_NBUF_CB_RX_IS_FRAG(skb) \
452 (((struct qdf_nbuf_cb *) \
453 ((skb)->cb))->u.rx.flag_is_frag)
454
455 #define QDF_NBUF_CB_RX_FCS_ERR(skb) \
456 (((struct qdf_nbuf_cb *) \
457 ((skb)->cb))->u.rx.fcs_err)
458
459 #define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
460 qdf_nbuf_set_state(skb, PACKET_STATE)
461
462 #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
463 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
464
465 #define QDF_NBUF_CB_TX_FTYPE(skb) \
466 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
467
468 #define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
469 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
470 #define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
471 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
472
473 /* Tx Flags Accessor Macros*/
474 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
475 (((struct qdf_nbuf_cb *) \
476 ((skb)->cb))->u.tx.flags.bits.flag_efrag)
477 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
478 (((struct qdf_nbuf_cb *) \
479 ((skb)->cb))->u.tx.flags.bits.flag_nbuf)
480 #define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
481 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
482 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
483 (((struct qdf_nbuf_cb *) \
484 ((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
485 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
486 (((struct qdf_nbuf_cb *) \
487 ((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
488 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
489 (((struct qdf_nbuf_cb *) \
490 ((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
491 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
492 (((struct qdf_nbuf_cb *) \
493 ((skb)->cb))->u.tx.flags.bits.flag_ext_header)
494 #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
495 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
496
497 #define QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL(skb) \
498 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.is_critical)
499 /* End of Tx Flags Accessor Macros */
500
501 /* Tx trace accessor macros */
502 #define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
503 (((struct qdf_nbuf_cb *) \
504 ((skb)->cb))->u.tx.trace.packet_state)
505
506 #define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
507 (((struct qdf_nbuf_cb *) \
508 ((skb)->cb))->u.tx.trace.is_packet_priv)
509
510 #define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
511 (((struct qdf_nbuf_cb *) \
512 ((skb)->cb))->u.tx.trace.packet_track)
513
514 #define QDF_NBUF_CB_TX_PACKET_TO_FW(skb)\
515 (((struct qdf_nbuf_cb *) \
516 ((skb)->cb))->u.tx.trace.to_fw)
517
518 #define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
519 (((struct qdf_nbuf_cb *) \
520 ((skb)->cb))->u.rx.trace.packet_track)
521
522 #define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
523 (((struct qdf_nbuf_cb *) \
524 ((skb)->cb))->u.tx.trace.proto_type)
525
526 #define QDF_NBUF_CB_TX_DP_TRACE(skb)\
527 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
528
529 #define QDF_NBUF_CB_DP_TRACE_PRINT(skb) \
530 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
531
532 #define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) \
533 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
534
535 #define QDF_NBUF_CB_GET_IS_BCAST(skb)\
536 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
537
538 #define QDF_NBUF_CB_GET_IS_MCAST(skb)\
539 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
540
541 #define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
542 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
543
544 #define QDF_NBUF_CB_SET_BCAST(skb) \
545 (((struct qdf_nbuf_cb *) \
546 ((skb)->cb))->u.tx.trace.is_bcast = true)
547
548 #define QDF_NBUF_CB_SET_MCAST(skb) \
549 (((struct qdf_nbuf_cb *) \
550 ((skb)->cb))->u.tx.trace.is_mcast = true)
551 /* End of Tx trace accessor macros */
552
553 #define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
554 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
555 #define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
556 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.pa_ts.paddr.dma_addr)
557
558 #define QDF_NBUF_CB_TX_TS_VALID(skb) \
559 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.flag_ts_valid)
560 #define QDF_NBUF_CB_TX_TS_VALUE(skb) \
561 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.pa_ts.ts_value)
562
563 /* assume the OS provides a single fragment */
564 #define __qdf_nbuf_get_num_frags(skb) \
565 (QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
566
567 #define __qdf_nbuf_reset_num_frags(skb) \
568 (QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
569
570 #define QDF_NBUF_CB_RX_TCP_SEQ_NUM(skb) \
571 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \
572 dp_ext.tcp.tcp_seq_num)
573 #define QDF_NBUF_CB_RX_TCP_ACK_NUM(skb) \
574 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \
575 dp_ext.tcp.tcp_ack_num)
576 #define QDF_NBUF_CB_RX_MPDU_SEQ_NUM(skb) \
577 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \
578 dp_ext.ext.mpdu_seq)
579
580 #define QDF_NBUF_CB_RX_LRO_CTX(skb) \
581 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.lro_ctx)
582
583 #define QDF_NBUF_CB_TX_IPA_OWNED(skb) \
584 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.ipa.owned)
585 #define QDF_NBUF_CB_TX_IPA_PRIV(skb) \
586 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.ipa.priv)
587 #define QDF_NBUF_CB_TX_DESC_ID(skb)\
588 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.desc_id)
589 #define QDF_NBUF_CB_MGMT_TXRX_DESC_ID(skb)\
590 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.mgmt_desc_id)
591 #define QDF_NBUF_CB_TX_DMA_BI_MAP(skb) \
592 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m. \
593 dma_option.bi_map)
594 #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(skb) \
595 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m. \
596 flag_notify_comp)
597
598 #define QDF_NBUF_CB_TX_BAND(skb) \
599 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m. \
600 band)
601
602 #define QDF_NBUF_CB_RX_PEER_ID(skb) \
603 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.dp. \
604 wifi3.peer_id)
605
606 #define QDF_NBUF_CB_RX_PKT_LEN(skb) \
607 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.dp. \
608 wifi3.msdu_len)
609
610 #define QDF_NBUF_CB_RX_MAP_IDX(skb) \
611 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.dp. \
612 wifi2.map_index)
613
614 #define QDF_NBUF_CB_RX_PEER_CACHED_FRM(skb) \
615 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \
616 peer_cached_buf_frm)
617
618 #define QDF_NBUF_CB_RX_FLUSH_IND(skb) \
619 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.flush_ind)
620
621 #define QDF_NBUF_CB_RX_PACKET_BUFF_POOL(skb) \
622 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \
623 packet_buf_pool)
624
625 #define QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(skb) \
626 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \
627 l3_hdr_pad)
628
629 #define QDF_NBUF_CB_RX_PACKET_EXC_FRAME(skb) \
630 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \
631 exc_frm)
632
633 #define QDF_NBUF_CB_RX_PACKET_IPA_SMMU_MAP(skb) \
634 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \
635 ipa_smmu_map)
636
637 #define QDF_NBUF_CB_RX_PACKET_REO_DEST_IND_OR_SW_EXCPT(skb) \
638 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \
639 reo_dest_ind_or_sw_excpt)
640
641 #define QDF_NBUF_CB_RX_PACKET_LMAC_ID(skb) \
642 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \
643 lmac_id)
644
645 #define QDF_NBUF_CB_RX_LOGICAL_LINK_ID(skb) \
646 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \
647 logical_link_id)
648
649 #define QDF_NBUF_CB_RX_BAND(skb) \
650 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \
651 band)
652
653 #define __qdf_nbuf_ipa_owned_get(skb) \
654 QDF_NBUF_CB_TX_IPA_OWNED(skb)
655
656 #define __qdf_nbuf_ipa_owned_set(skb) \
657 (QDF_NBUF_CB_TX_IPA_OWNED(skb) = 1)
658
659 #define __qdf_nbuf_ipa_owned_clear(skb) \
660 (QDF_NBUF_CB_TX_IPA_OWNED(skb) = 0)
661
662 #define __qdf_nbuf_ipa_priv_get(skb) \
663 QDF_NBUF_CB_TX_IPA_PRIV(skb)
664
665 #define __qdf_nbuf_ipa_priv_set(skb, priv) \
666 (QDF_NBUF_CB_TX_IPA_PRIV(skb) = (priv))
667
668 #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
669 (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
670
671 #define __qdf_nbuf_data_attr_get(skb) \
672 QDF_NBUF_CB_TX_DATA_ATTR(skb)
673 #define __qdf_nbuf_data_attr_set(skb, data_attr) \
674 (QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
675
676 #define __qdf_nbuf_set_tx_ts(skb, ts) \
677 do { \
678 QDF_NBUF_CB_TX_TS_VALUE(skb) = (ts); \
679 QDF_NBUF_CB_TX_TS_VALID(skb) = 1; \
680 } while (0)
681
682 #define __qdf_nbuf_clear_tx_ts(skb) \
683 do { \
684 QDF_NBUF_CB_TX_TS_VALUE(skb) = 0; \
685 QDF_NBUF_CB_TX_TS_VALID(skb) = 0; \
686 } while (0)
687
688 #define __qdf_nbuf_get_tx_ts(skb) \
689 (QDF_NBUF_CB_TX_TS_VALID(skb) ? \
690 QDF_NBUF_CB_TX_TS_VALUE(skb) : 0)
691
692 /**
693 * __qdf_nbuf_map_nbytes_single() - map nbytes
694 * @osdev: os device
695 * @buf: buffer
696 * @dir: direction
697 * @nbytes: number of bytes
698 *
699 * Return: QDF_STATUS
700 */
701 #ifdef A_SIMOS_DEVHOST
__qdf_nbuf_map_nbytes_single(qdf_device_t osdev,struct sk_buff * buf,qdf_dma_dir_t dir,int nbytes)702 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
703 qdf_device_t osdev, struct sk_buff *buf,
704 qdf_dma_dir_t dir, int nbytes)
705 {
706 qdf_dma_addr_t paddr;
707
708 QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
709 return QDF_STATUS_SUCCESS;
710 }
711 #else
__qdf_nbuf_map_nbytes_single(qdf_device_t osdev,struct sk_buff * buf,qdf_dma_dir_t dir,int nbytes)712 static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
713 qdf_device_t osdev, struct sk_buff *buf,
714 qdf_dma_dir_t dir, int nbytes)
715 {
716 qdf_dma_addr_t paddr;
717 QDF_STATUS ret;
718
719 /* assume that the OS only provides a single fragment */
720 QDF_NBUF_CB_PADDR(buf) = paddr =
721 dma_map_single(osdev->dev, buf->data,
722 nbytes, __qdf_dma_dir_to_os(dir));
723 ret = dma_mapping_error(osdev->dev, paddr) ?
724 QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
725 if (QDF_IS_STATUS_SUCCESS(ret))
726 __qdf_record_nbuf_nbytes(__qdf_nbuf_get_end_offset(buf),
727 dir, true);
728 return ret;
729 }
730 #endif
731 /**
732 * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
733 * @osdev: os device
734 * @buf: buffer
735 * @dir: direction
736 * @nbytes: number of bytes
737 *
738 * Return: none
739 */
740 #if defined(A_SIMOS_DEVHOST)
741 static inline void
__qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev,struct sk_buff * buf,qdf_dma_dir_t dir,int nbytes)742 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
743 qdf_dma_dir_t dir, int nbytes)
744 {
745 }
746
747 #else
748 static inline void
__qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev,struct sk_buff * buf,qdf_dma_dir_t dir,int nbytes)749 __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
750 qdf_dma_dir_t dir, int nbytes)
751 {
752 qdf_dma_addr_t paddr = QDF_NBUF_CB_PADDR(buf);
753
754 if (qdf_likely(paddr)) {
755 __qdf_record_nbuf_nbytes(
756 __qdf_nbuf_get_end_offset(buf), dir, false);
757 dma_unmap_single(osdev->dev, paddr, nbytes,
758 __qdf_dma_dir_to_os(dir));
759 return;
760 }
761 }
762 #endif
763
764 /**
765 * __qdf_nbuf_reset() - reset the buffer data and pointer
766 * @skb: Network buf instance
767 * @reserve: reserve
768 * @align: align
769 *
770 * Return: none
771 */
772 static inline void
__qdf_nbuf_reset(struct sk_buff * skb,int reserve,int align)773 __qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align)
774 {
775 int offset;
776
777 skb_push(skb, skb_headroom(skb));
778 skb_put(skb, skb_tailroom(skb));
779 memset(skb->data, 0x0, skb->len);
780 skb_trim(skb, 0);
781 skb_reserve(skb, NET_SKB_PAD);
782 memset(skb->cb, 0x0, sizeof(skb->cb));
783
784 /*
785 * The default is for netbuf fragments to be interpreted
786 * as wordstreams rather than bytestreams.
787 */
788 QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
789 QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
790
791 /*
792 * Align & make sure that the tail & data are adjusted properly
793 */
794
795 if (align) {
796 offset = ((unsigned long)skb->data) % align;
797 if (offset)
798 skb_reserve(skb, align - offset);
799 }
800
801 skb_reserve(skb, reserve);
802 }
803
804 /**
805 * qdf_nbuf_cb_update_vdev_id() - update vdev id in skb cb
806 * @skb: skb pointer whose cb is updated with vdev id information
807 * @vdev_id: vdev id to be updated in cb
808 *
809 * Return: void
810 */
811 static inline void
qdf_nbuf_cb_update_vdev_id(struct sk_buff * skb,uint8_t vdev_id)812 qdf_nbuf_cb_update_vdev_id(struct sk_buff *skb, uint8_t vdev_id)
813 {
814 QDF_NBUF_CB_RX_VDEV_ID(skb) = vdev_id;
815 }
816
817 /**
818 * __qdf_nbuf_init_replenish_timer() - Initialize the alloc replenish timer
819 *
820 * This function initializes the nbuf alloc fail replenish timer.
821 *
822 * Return: void
823 */
824 void __qdf_nbuf_init_replenish_timer(void);
825
826 /**
827 * __qdf_nbuf_deinit_replenish_timer() - Deinitialize the alloc replenish timer
828 *
829 * This function deinitializes the nbuf alloc fail replenish timer.
830 *
831 * Return: void
832 */
833 void __qdf_nbuf_deinit_replenish_timer(void);
834
835 /**
836 * __qdf_nbuf_len() - return the amount of valid data in the skb
837 * @skb: Pointer to network buffer
838 *
839 * This API returns the amount of valid data in the skb, If there are frags
840 * then it returns total length.
841 *
842 * Return: network buffer length
843 */
__qdf_nbuf_len(struct sk_buff * skb)844 static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
845 {
846 int i, extra_frag_len = 0;
847
848 i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
849 if (i > 0)
850 extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
851
852 return extra_frag_len + skb->len;
853 }
854
855 /**
856 * __qdf_nbuf_num_frags_init() - init extra frags
857 * @skb: sk buffer
858 *
859 * Return: none
860 */
861 static inline
__qdf_nbuf_num_frags_init(struct sk_buff * skb)862 void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
863 {
864 QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
865 }
866
867 /**
868 * __qdf_nbuf_push_head() - Push data in the front
869 * @skb: Pointer to network buffer
870 * @size: size to be pushed
871 *
872 * Return: New data pointer of this buf after data has been pushed,
873 * or NULL if there is not enough room in this buf.
874 */
__qdf_nbuf_push_head(struct sk_buff * skb,size_t size)875 static inline uint8_t *__qdf_nbuf_push_head(struct sk_buff *skb, size_t size)
876 {
877 if (QDF_NBUF_CB_PADDR(skb))
878 QDF_NBUF_CB_PADDR(skb) -= size;
879
880 return skb_push(skb, size);
881 }
882
883
884 /**
885 * __qdf_nbuf_pull_head() - pull data out from the front
886 * @skb: Pointer to network buffer
887 * @size: size to be popped
888 *
889 * Return: New data pointer of this buf after data has been popped,
890 * or NULL if there is not sufficient data to pull.
891 */
__qdf_nbuf_pull_head(struct sk_buff * skb,size_t size)892 static inline uint8_t *__qdf_nbuf_pull_head(struct sk_buff *skb, size_t size)
893 {
894 if (QDF_NBUF_CB_PADDR(skb))
895 QDF_NBUF_CB_PADDR(skb) += size;
896
897 return skb_pull(skb, size);
898 }
899
900 /**
901 * qdf_nbuf_is_intra_bss() - get intra bss bit
902 * @buf: Network buffer
903 *
904 * Return: integer value - 0/1
905 */
qdf_nbuf_is_intra_bss(struct sk_buff * buf)906 static inline int qdf_nbuf_is_intra_bss(struct sk_buff *buf)
907 {
908 return 0;
909 }
910
911 /**
912 * qdf_nbuf_set_intra_bss() - set intra bss bit
913 * @buf: Network buffer
914 * @val: 0/1
915 *
916 * Return: void
917 */
qdf_nbuf_set_intra_bss(struct sk_buff * buf,uint8_t val)918 static inline void qdf_nbuf_set_intra_bss(struct sk_buff *buf, uint8_t val)
919 {
920 }
921
922 /**
923 * qdf_nbuf_init_replenish_timer - Initialize the alloc replenish timer
924 *
925 * This function initializes the nbuf alloc fail replenish timer.
926 *
927 * Return: void
928 */
929 static inline void
qdf_nbuf_init_replenish_timer(void)930 qdf_nbuf_init_replenish_timer(void)
931 {
932 __qdf_nbuf_init_replenish_timer();
933 }
934
935 /**
936 * qdf_nbuf_deinit_replenish_timer - Deinitialize the alloc replenish timer
937 *
938 * This function deinitializes the nbuf alloc fail replenish timer.
939 *
940 * Return: void
941 */
942 static inline void
qdf_nbuf_deinit_replenish_timer(void)943 qdf_nbuf_deinit_replenish_timer(void)
944 {
945 __qdf_nbuf_deinit_replenish_timer();
946 }
947
948 static inline void
__qdf_nbuf_dma_inv_range(const void * buf_start,const void * buf_end)949 __qdf_nbuf_dma_inv_range(const void *buf_start, const void *buf_end) {}
950
951 static inline void
__qdf_nbuf_dma_inv_range_no_dsb(const void * buf_start,const void * buf_end)952 __qdf_nbuf_dma_inv_range_no_dsb(const void *buf_start, const void *buf_end) {}
953
954 static inline void
__qdf_nbuf_dma_clean_range_no_dsb(const void * buf_start,const void * buf_end)955 __qdf_nbuf_dma_clean_range_no_dsb(const void *buf_start, const void *buf_end) {}
956
957 static inline void
__qdf_dsb(void)958 __qdf_dsb(void) {}
959
960 static inline void
__qdf_nbuf_dma_clean_range(const void * buf_start,const void * buf_end)961 __qdf_nbuf_dma_clean_range(const void *buf_start, const void *buf_end) {}
962
963 #endif /*_I_QDF_NBUF_M_H */
964