xref: /wlan-driver/qcacld-3.0/core/dp/htt/htt_internal.h (revision 5113495b16420b49004c444715d2daae2066e7dc)
1 /*
2  * Copyright (c) 2011, 2014-2020 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _HTT_INTERNAL__H_
21 #define _HTT_INTERNAL__H_
22 
23 #include <athdefs.h>            /* A_STATUS */
24 #include <qdf_nbuf.h>           /* qdf_nbuf_t */
25 #include <qdf_util.h>           /* qdf_assert */
26 #include <htc_api.h>            /* HTC_PACKET */
27 
28 #include <htt_types.h>
29 
30 /* htt_rx.c */
31 #define RX_MSDU_END_4_FIRST_MSDU_MASK \
32 	(pdev->targetdef->d_RX_MSDU_END_4_FIRST_MSDU_MASK)
33 #define RX_MSDU_END_4_FIRST_MSDU_LSB \
34 	(pdev->targetdef->d_RX_MSDU_END_4_FIRST_MSDU_LSB)
35 #define RX_MPDU_START_0_RETRY_LSB  \
36 	(pdev->targetdef->d_RX_MPDU_START_0_RETRY_LSB)
37 #define RX_MPDU_START_0_RETRY_MASK  \
38 	(pdev->targetdef->d_RX_MPDU_START_0_RETRY_MASK)
39 #define RX_MPDU_START_0_SEQ_NUM_MASK \
40 	(pdev->targetdef->d_RX_MPDU_START_0_SEQ_NUM_MASK)
41 #define RX_MPDU_START_0_SEQ_NUM_LSB \
42 	(pdev->targetdef->d_RX_MPDU_START_0_SEQ_NUM_LSB)
43 #define RX_MPDU_START_2_PN_47_32_LSB \
44 	(pdev->targetdef->d_RX_MPDU_START_2_PN_47_32_LSB)
45 #define RX_MPDU_START_2_PN_47_32_MASK \
46 	(pdev->targetdef->d_RX_MPDU_START_2_PN_47_32_MASK)
47 #define RX_MPDU_START_2_TID_LSB  \
48 	(pdev->targetdef->d_RX_MPDU_START_2_TID_LSB)
49 #define RX_MPDU_START_2_TID_MASK  \
50 	(pdev->targetdef->d_RX_MPDU_START_2_TID_MASK)
51 #define RX_MSDU_END_1_KEY_ID_OCT_MASK \
52 	(pdev->targetdef->d_RX_MSDU_END_1_KEY_ID_OCT_MASK)
53 #define RX_MSDU_END_1_KEY_ID_OCT_LSB \
54 	(pdev->targetdef->d_RX_MSDU_END_1_KEY_ID_OCT_LSB)
55 #define RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK \
56 	(pdev->targetdef->d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK)
57 #define RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB \
58 	(pdev->targetdef->d_RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB)
59 #define RX_MSDU_END_4_LAST_MSDU_MASK \
60 	(pdev->targetdef->d_RX_MSDU_END_4_LAST_MSDU_MASK)
61 #define RX_MSDU_END_4_LAST_MSDU_LSB \
62 	(pdev->targetdef->d_RX_MSDU_END_4_LAST_MSDU_LSB)
63 #define RX_ATTENTION_0_MCAST_BCAST_MASK \
64 	(pdev->targetdef->d_RX_ATTENTION_0_MCAST_BCAST_MASK)
65 #define RX_ATTENTION_0_MCAST_BCAST_LSB \
66 	(pdev->targetdef->d_RX_ATTENTION_0_MCAST_BCAST_LSB)
67 #define RX_ATTENTION_0_FRAGMENT_MASK \
68 	(pdev->targetdef->d_RX_ATTENTION_0_FRAGMENT_MASK)
69 #define RX_ATTENTION_0_FRAGMENT_LSB \
70 	(pdev->targetdef->d_RX_ATTENTION_0_FRAGMENT_LSB)
71 #define RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK \
72 	(pdev->targetdef->d_RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK)
73 #define RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK \
74 	(pdev->targetdef->d_RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK)
75 #define RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB \
76 	(pdev->targetdef->d_RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB)
77 #define RX_MSDU_START_0_MSDU_LENGTH_MASK \
78 	(pdev->targetdef->d_RX_MSDU_START_0_MSDU_LENGTH_MASK)
79 #define RX_MSDU_START_0_MSDU_LENGTH_LSB \
80 	(pdev->targetdef->d_RX_MSDU_START_0_MSDU_LENGTH_LSB)
81 #define RX_MPDU_START_0_ENCRYPTED_MASK \
82 	(pdev->targetdef->d_RX_MPDU_START_0_ENCRYPTED_MASK)
83 #define RX_MPDU_START_0_ENCRYPTED_LSB \
84 	(pdev->targetdef->d_RX_MPDU_START_0_ENCRYPTED_LSB)
85 #define RX_ATTENTION_0_MORE_DATA_MASK \
86 	(pdev->targetdef->d_RX_ATTENTION_0_MORE_DATA_MASK)
87 #define RX_ATTENTION_0_MSDU_DONE_MASK \
88 	(pdev->targetdef->d_RX_ATTENTION_0_MSDU_DONE_MASK)
89 #define RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK \
90 	(pdev->targetdef->d_RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK)
91 #define RX_MSDU_START_2_DECAP_FORMAT_OFFSET \
92 	(pdev->targetdef->d_RX_MSDU_START_2_DECAP_FORMAT_OFFSET)
93 #define RX_MSDU_START_2_DECAP_FORMAT_LSB \
94 	(pdev->targetdef->d_RX_MSDU_START_2_DECAP_FORMAT_LSB)
95 #define RX_MSDU_START_2_DECAP_FORMAT_MASK \
96 	(pdev->targetdef->d_RX_MSDU_START_2_DECAP_FORMAT_MASK)
97 /* end */
98 
99 #ifndef offsetof
100 #define offsetof(type, field)   ((size_t)(&((type *)0)->field))
101 #endif
102 
103 #undef MS
104 #define MS(_v, _f) (((_v) & _f ## _MASK) >> _f ## _LSB)
105 #undef SM
106 #define SM(_v, _f) (((_v) << _f ## _LSB) & _f ## _MASK)
107 #undef WO
108 #define WO(_f)      ((_f ## _OFFSET) >> 2)
109 
110 #define GET_FIELD(_addr, _f) MS(*((A_UINT32 *)(_addr) + WO(_f)), _f)
111 
112 #include <rx_desc.h>
113 #include <wal_rx_desc.h>        /* struct rx_attention, etc */
114 
115 struct htt_host_fw_desc_base {
116 	union {
117 		struct fw_rx_desc_base val;
118 		A_UINT32 dummy_pad;     /* make sure it is DOWRD aligned */
119 	} u;
120 };
121 
122 
123 /*
124  * This struct defines the basic descriptor information used by host,
125  * which is written either by the 11ac HW MAC into the host Rx data
126  * buffer ring directly or generated by FW and copied from Rx indication
127  */
128 struct htt_host_rx_desc_base {
129 	struct htt_host_fw_desc_base fw_desc;
130 	struct rx_attention attention;
131 	struct rx_frag_info frag_info;
132 	struct rx_mpdu_start mpdu_start;
133 	struct rx_msdu_start msdu_start;
134 	struct rx_msdu_end msdu_end;
135 	struct rx_mpdu_end mpdu_end;
136 	struct rx_ppdu_start ppdu_start;
137 	struct rx_ppdu_end ppdu_end;
138 #ifdef QCA_WIFI_3_0_ADRASTEA
139 /* Increased to support some of offload features */
140 #define RX_HTT_HDR_STATUS_LEN 256
141 #else
142 #define RX_HTT_HDR_STATUS_LEN 64
143 #endif
144 	char rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
145 };
146 
147 #define RX_DESC_ATTN_MPDU_LEN_ERR_BIT   0x08000000
148 
149 #define RX_STD_DESC_ATTN_OFFSET	\
150 	(offsetof(struct htt_host_rx_desc_base, attention))
151 #define RX_STD_DESC_FRAG_INFO_OFFSET \
152 	(offsetof(struct htt_host_rx_desc_base, frag_info))
153 #define RX_STD_DESC_MPDU_START_OFFSET \
154 	(offsetof(struct htt_host_rx_desc_base, mpdu_start))
155 #define RX_STD_DESC_MSDU_START_OFFSET \
156 	(offsetof(struct htt_host_rx_desc_base, msdu_start))
157 #define RX_STD_DESC_MSDU_END_OFFSET \
158 	(offsetof(struct htt_host_rx_desc_base, msdu_end))
159 #define RX_STD_DESC_MPDU_END_OFFSET \
160 	(offsetof(struct htt_host_rx_desc_base, mpdu_end))
161 #define RX_STD_DESC_PPDU_START_OFFSET \
162 	(offsetof(struct htt_host_rx_desc_base, ppdu_start))
163 #define RX_STD_DESC_PPDU_END_OFFSET \
164 	(offsetof(struct htt_host_rx_desc_base, ppdu_end))
165 #define RX_STD_DESC_HDR_STATUS_OFFSET \
166 	(offsetof(struct htt_host_rx_desc_base, rx_hdr_status))
167 
168 #define RX_STD_DESC_FW_MSDU_OFFSET \
169 	(offsetof(struct htt_host_rx_desc_base, fw_desc))
170 
171 #define RX_STD_DESC_SIZE (sizeof(struct htt_host_rx_desc_base))
172 
173 #define RX_DESC_ATTN_OFFSET32       (RX_STD_DESC_ATTN_OFFSET >> 2)
174 #define RX_DESC_FRAG_INFO_OFFSET32  (RX_STD_DESC_FRAG_INFO_OFFSET >> 2)
175 #define RX_DESC_MPDU_START_OFFSET32 (RX_STD_DESC_MPDU_START_OFFSET >> 2)
176 #define RX_DESC_MSDU_START_OFFSET32 (RX_STD_DESC_MSDU_START_OFFSET >> 2)
177 #define RX_DESC_MSDU_END_OFFSET32   (RX_STD_DESC_MSDU_END_OFFSET >> 2)
178 #define RX_DESC_MPDU_END_OFFSET32   (RX_STD_DESC_MPDU_END_OFFSET >> 2)
179 #define RX_DESC_PPDU_START_OFFSET32 (RX_STD_DESC_PPDU_START_OFFSET >> 2)
180 #define RX_DESC_PPDU_END_OFFSET32   (RX_STD_DESC_PPDU_END_OFFSET >> 2)
181 #define RX_DESC_HDR_STATUS_OFFSET32 (RX_STD_DESC_HDR_STATUS_OFFSET >> 2)
182 
183 #define RX_STD_DESC_SIZE_DWORD      (RX_STD_DESC_SIZE >> 2)
184 
185 /*
186  * Make sure there is a minimum headroom provided in the rx netbufs
187  * for use by the OS shim and OS and rx data consumers.
188  */
189 #define HTT_RX_BUF_OS_MIN_HEADROOM 32
190 #define HTT_RX_STD_DESC_RESERVATION  \
191 	((HTT_RX_BUF_OS_MIN_HEADROOM > RX_STD_DESC_SIZE) ? \
192 	 HTT_RX_BUF_OS_MIN_HEADROOM : RX_STD_DESC_SIZE)
193 #define HTT_RX_DESC_RESERVATION32 \
194 	(HTT_RX_STD_DESC_RESERVATION >> 2)
195 
196 #define HTT_RX_DESC_ALIGN_MASK 7        /* 8-byte alignment */
197 
198 #ifdef DEBUG_RX_RING_BUFFER
199 #ifdef MSM_PLATFORM
200 #define HTT_ADDRESS_MASK   0xfffffffffffffffe
201 #else
202 #define HTT_ADDRESS_MASK   0xfffffffe
203 #endif /* MSM_PLATFORM */
204 
205 /**
206  * rx_buf_debug: rx_ring history
207  *
208  * There are three types of entries in history:
209  * 1) rx-descriptors posted (and received)
210  *    Both of these events are stored on the same entry
211  *    @paddr : physical address posted on the ring
212  *    @nbuf  : virtual address of nbuf containing data
213  *    @ndata : virtual address of data (corresponds to physical address)
214  *    @posted: time-stamp when the buffer is posted to the ring
215  *    @recved: time-stamp when the buffer is received (rx_in_order_ind)
216  *           : or 0, if the buffer has not been received yet
217  * 2) ring alloc-index (fill-index) updates
218  *    @paddr : = 0
219  *    @nbuf  : = 0
220  *    @ndata : = 0
221  *    posted : time-stamp when alloc index was updated
222  *    recved : value of alloc index
223  * 3) htt_rx_in_order_indication reception
224  *    @paddr : = 0
225  *    @nbuf  : = 0
226  *    @ndata : msdu_cnt
227  *    @posted: time-stamp when HTT message is received
228  *    @recvd : 0x48545452584D5367 ('HTTRXMSG')
229  */
230 #ifdef CONFIG_SLUB_DEBUG_ON
231 #define HTT_RX_RING_BUFF_DBG_LIST          (8 * 1024)
232 #else
233 #define HTT_RX_RING_BUFF_DBG_LIST          (4 * 1024)
234 #endif
235 struct rx_buf_debug {
236 	qdf_dma_addr_t paddr;
237 	qdf_nbuf_t     nbuf;
238 	void          *nbuf_data;
239 	uint64_t       posted; /* timestamp */
240 	uint64_t       recved; /* timestamp */
241 	int            cpu;
242 
243 };
244 #endif
245 
htt_rx_desc(qdf_nbuf_t msdu)246 static inline struct htt_host_rx_desc_base *htt_rx_desc(qdf_nbuf_t msdu)
247 {
248 	return (struct htt_host_rx_desc_base *)
249 	       (((size_t) (qdf_nbuf_head(msdu) + HTT_RX_DESC_ALIGN_MASK)) &
250 		~HTT_RX_DESC_ALIGN_MASK);
251 }
252 
253 #if defined(HELIUMPLUS)
254 /**
255  * htt_print_rx_desc_lro() - print LRO information in the rx
256  * descriptor
257  * @rx_desc: HTT rx descriptor
258  *
259  * Prints the LRO related fields in the HTT rx descriptor
260  *
261  * Return: none
262  */
htt_print_rx_desc_lro(struct htt_host_rx_desc_base * rx_desc)263 static inline void htt_print_rx_desc_lro(struct htt_host_rx_desc_base *rx_desc)
264 {
265 	qdf_nofl_info
266 		("----------------------RX DESC LRO----------------------\n");
267 	qdf_nofl_info("msdu_end.lro_eligible:0x%x\n",
268 		      rx_desc->msdu_end.lro_eligible);
269 	qdf_nofl_info("msdu_start.tcp_only_ack:0x%x\n",
270 		      rx_desc->msdu_start.tcp_only_ack);
271 	qdf_nofl_info("msdu_end.tcp_udp_chksum:0x%x\n",
272 		      rx_desc->msdu_end.tcp_udp_chksum);
273 	qdf_nofl_info("msdu_end.tcp_seq_number:0x%x\n",
274 		      rx_desc->msdu_end.tcp_seq_number);
275 	qdf_nofl_info("msdu_end.tcp_ack_number:0x%x\n",
276 		      rx_desc->msdu_end.tcp_ack_number);
277 	qdf_nofl_info("msdu_start.tcp_proto:0x%x\n",
278 		      rx_desc->msdu_start.tcp_proto);
279 	qdf_nofl_info("msdu_start.ipv6_proto:0x%x\n",
280 		      rx_desc->msdu_start.ipv6_proto);
281 	qdf_nofl_info("msdu_start.ipv4_proto:0x%x\n",
282 		      rx_desc->msdu_start.ipv4_proto);
283 	qdf_nofl_info("msdu_start.l3_offset:0x%x\n",
284 		      rx_desc->msdu_start.l3_offset);
285 	qdf_nofl_info("msdu_start.l4_offset:0x%x\n",
286 		      rx_desc->msdu_start.l4_offset);
287 	qdf_nofl_info("msdu_start.flow_id_toeplitz:0x%x\n",
288 		      rx_desc->msdu_start.flow_id_toeplitz);
289 	qdf_nofl_info
290 		("---------------------------------------------------------\n");
291 }
292 
293 /**
294  * htt_print_rx_desc_lro() - extract LRO information from the rx
295  * descriptor
296  * @msdu: network buffer
297  * @rx_desc: HTT rx descriptor
298  *
299  * Extracts the LRO related fields from the HTT rx descriptor
300  * and stores them in the network buffer's control block
301  *
302  * Return: none
303  */
htt_rx_extract_lro_info(qdf_nbuf_t msdu,struct htt_host_rx_desc_base * rx_desc)304 static inline void htt_rx_extract_lro_info(qdf_nbuf_t msdu,
305 	 struct htt_host_rx_desc_base *rx_desc)
306 {
307 	if (rx_desc->attention.tcp_udp_chksum_fail)
308 		QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = 0;
309 	else
310 		QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) =
311 			rx_desc->msdu_end.lro_eligible;
312 
313 	if (QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu)) {
314 		QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) =
315 			rx_desc->msdu_start.tcp_only_ack;
316 		QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) =
317 			rx_desc->msdu_end.tcp_udp_chksum;
318 		QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) =
319 			rx_desc->msdu_end.tcp_seq_number;
320 		QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) =
321 			rx_desc->msdu_end.tcp_ack_number;
322 		QDF_NBUF_CB_RX_TCP_WIN(msdu) =
323 			rx_desc->msdu_end.window_size;
324 		QDF_NBUF_CB_RX_TCP_PROTO(msdu) =
325 			rx_desc->msdu_start.tcp_proto;
326 		QDF_NBUF_CB_RX_IPV6_PROTO(msdu) =
327 			rx_desc->msdu_start.ipv6_proto;
328 		QDF_NBUF_CB_RX_TCP_OFFSET(msdu) =
329 			rx_desc->msdu_start.l4_offset;
330 		QDF_NBUF_CB_RX_FLOW_ID(msdu) =
331 			rx_desc->msdu_start.flow_id_toeplitz;
332 	}
333 }
334 #else
htt_print_rx_desc_lro(struct htt_host_rx_desc_base * rx_desc)335 static inline void htt_print_rx_desc_lro(struct htt_host_rx_desc_base *rx_desc)
336 {}
htt_rx_extract_lro_info(qdf_nbuf_t msdu,struct htt_host_rx_desc_base * rx_desc)337 static inline void htt_rx_extract_lro_info(qdf_nbuf_t msdu,
338 	 struct htt_host_rx_desc_base *rx_desc) {}
339 #endif /* HELIUMPLUS */
340 
htt_print_rx_desc(struct htt_host_rx_desc_base * rx_desc)341 static inline void htt_print_rx_desc(struct htt_host_rx_desc_base *rx_desc)
342 {
343 	qdf_nofl_info
344 		("----------------------RX DESC----------------------------\n");
345 	qdf_nofl_info("attention: %#010x\n",
346 		      (unsigned int)(*(uint32_t *)&rx_desc->attention));
347 	qdf_nofl_info("frag_info: %#010x\n",
348 		      (unsigned int)(*(uint32_t *)&rx_desc->frag_info));
349 	qdf_nofl_info("mpdu_start: %#010x %#010x %#010x\n",
350 		      (unsigned int)(((uint32_t *)&rx_desc->mpdu_start)[0]),
351 		      (unsigned int)(((uint32_t *)&rx_desc->mpdu_start)[1]),
352 		      (unsigned int)(((uint32_t *)&rx_desc->mpdu_start)[2]));
353 	qdf_nofl_info("msdu_start: %#010x %#010x %#010x\n",
354 		      (unsigned int)(((uint32_t *)&rx_desc->msdu_start)[0]),
355 		      (unsigned int)(((uint32_t *)&rx_desc->msdu_start)[1]),
356 		      (unsigned int)(((uint32_t *)&rx_desc->msdu_start)[2]));
357 	qdf_nofl_info("msdu_end: %#010x %#010x %#010x %#010x %#010x\n",
358 		      (unsigned int)(((uint32_t *)&rx_desc->msdu_end)[0]),
359 		      (unsigned int)(((uint32_t *)&rx_desc->msdu_end)[1]),
360 		      (unsigned int)(((uint32_t *)&rx_desc->msdu_end)[2]),
361 		      (unsigned int)(((uint32_t *)&rx_desc->msdu_end)[3]),
362 		      (unsigned int)(((uint32_t *)&rx_desc->msdu_end)[4]));
363 	qdf_nofl_info("mpdu_end: %#010x\n",
364 		      (unsigned int)(*(uint32_t *)&rx_desc->mpdu_end));
365 	qdf_nofl_info("ppdu_start: %#010x %#010x %#010x %#010x %#010x\n"
366 		      "%#010x %#010x %#010x %#010x %#010x\n",
367 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_start)[0]),
368 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_start)[1]),
369 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_start)[2]),
370 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_start)[3]),
371 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_start)[4]),
372 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_start)[5]),
373 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_start)[6]),
374 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_start)[7]),
375 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_start)[8]),
376 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_start)[9]));
377 	qdf_nofl_info("ppdu_end: %#010x %#010x %#010x %#010x %#010x\n"
378 		      "%#010x %#010x %#010x %#010x %#010x\n"
379 		      "%#010x,%#010x %#010x %#010x %#010x\n"
380 		      "%#010x %#010x %#010x %#010x %#010x\n" "%#010x %#010x\n",
381 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[0]),
382 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[1]),
383 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[2]),
384 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[3]),
385 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[4]),
386 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[5]),
387 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[6]),
388 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[7]),
389 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[8]),
390 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[9]),
391 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[10]),
392 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[11]),
393 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[12]),
394 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[13]),
395 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[14]),
396 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[15]),
397 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[16]),
398 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[17]),
399 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[18]),
400 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[19]),
401 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[20]),
402 		      (unsigned int)(((uint32_t *)&rx_desc->ppdu_end)[21]));
403 	qdf_nofl_info
404 		("---------------------------------------------------------\n");
405 }
406 
407 #ifndef HTT_ASSERT_LEVEL
408 #define HTT_ASSERT_LEVEL 3
409 #endif
410 
411 #define HTT_ASSERT_ALWAYS(condition) qdf_assert_always((condition))
412 
413 #define HTT_ASSERT0(condition) qdf_assert((condition))
414 #if HTT_ASSERT_LEVEL > 0
415 #define HTT_ASSERT1(condition) qdf_assert((condition))
416 #else
417 #define HTT_ASSERT1(condition)
418 #endif
419 
420 #if HTT_ASSERT_LEVEL > 1
421 #define HTT_ASSERT2(condition) qdf_assert((condition))
422 #else
423 #define HTT_ASSERT2(condition)
424 #endif
425 
426 #if HTT_ASSERT_LEVEL > 2
427 #define HTT_ASSERT3(condition) qdf_assert((condition))
428 #else
429 #define HTT_ASSERT3(condition)
430 #endif
431 
432 /*
433  * HTT_MAX_SEND_QUEUE_DEPTH -
434  * How many packets HTC should allow to accumulate in a send queue
435  * before calling the EpSendFull callback to see whether to retain
436  * or drop packets.
437  * This is not relevant for LL, where tx descriptors should be immediately
438  * downloaded to the target.
439  * This is not very relevant for HL either, since it is anticipated that
440  * the HL tx download scheduler will not work this far in advance - rather,
441  * it will make its decisions just-in-time, so it can be responsive to
442  * changing conditions.
443  * Hence, this queue depth threshold spec is mostly just a formality.
444  */
445 #define HTT_MAX_SEND_QUEUE_DEPTH 64
446 
447 #define IS_PWR2(value) (((value) ^ ((value)-1)) == ((value) << 1) - 1)
448 
449 /*
450  * HTT_RX_PRE_ALLOC_POOL_SIZE -
451  * How many Rx Buffer should be there in pre-allocated pool of buffers.
452  * This is mainly for low memory condition where kernel fails to alloc
453  * SKB buffer to the Rx ring.
454  */
455 #define HTT_RX_PRE_ALLOC_POOL_SIZE 64
456 /* Max rx MSDU size including L2 headers */
457 #define MSDU_SIZE 1560
458 /* Rounding up to a cache line size. */
459 #define HTT_RX_BUF_SIZE  roundup(MSDU_SIZE +				\
460 				 sizeof(struct htt_host_rx_desc_base),	\
461 				 QDF_CACHE_LINE_SZ)
462 #define MAX_RX_PAYLOAD_SZ (HTT_RX_BUF_SIZE - RX_STD_DESC_SIZE)
463 /*
464  * DMA_MAP expects the buffer to be an integral number of cache lines.
465  * Rather than checking the actual cache line size, this code makes a
466  * conservative estimate of what the cache line size could be.
467  */
468 #define HTT_LOG2_MAX_CACHE_LINE_SIZE 7  /* 2^7 = 128 */
469 #define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
470 
471 #ifdef BIG_ENDIAN_HOST
472 /*
473  * big-endian: bytes within a 4-byte "word" are swapped:
474  * pre-swap  post-swap
475  *  index     index
476  *    0         3
477  *    1         2
478  *    2         1
479  *    3         0
480  *    4         7
481  *    5         6
482  * etc.
483  * To compute the post-swap index from the pre-swap index, compute
484  * the byte offset for the start of the word (index & ~0x3) and add
485  * the swapped byte offset within the word (3 - (index & 0x3)).
486  */
487 #define HTT_ENDIAN_BYTE_IDX_SWAP(idx) (((idx) & ~0x3) + (3 - ((idx) & 0x3)))
488 #else
489 /* little-endian: no adjustment needed */
490 #define HTT_ENDIAN_BYTE_IDX_SWAP(idx) idx
491 #endif
492 
493 #define HTT_TX_MUTEX_INIT(_mutex)			\
494 	qdf_spinlock_create(_mutex)
495 
496 #define HTT_TX_MUTEX_ACQUIRE(_mutex)			\
497 	qdf_spin_lock_bh(_mutex)
498 
499 #define HTT_TX_MUTEX_RELEASE(_mutex)			\
500 	qdf_spin_unlock_bh(_mutex)
501 
502 #define HTT_TX_MUTEX_DESTROY(_mutex)			\
503 	qdf_spinlock_destroy(_mutex)
504 
505 #ifdef ATH_11AC_TXCOMPACT
506 
507 #define HTT_TX_NBUF_QUEUE_MUTEX_INIT(_pdev)		\
508 	qdf_spinlock_create(&_pdev->txnbufq_mutex)
509 
510 #define HTT_TX_NBUF_QUEUE_MUTEX_DESTROY(_pdev)	       \
511 	HTT_TX_MUTEX_DESTROY(&_pdev->txnbufq_mutex)
512 
513 #define HTT_TX_NBUF_QUEUE_REMOVE(_pdev, _msdu)	do {	\
514 	HTT_TX_MUTEX_ACQUIRE(&_pdev->txnbufq_mutex);	\
515 	_msdu =  qdf_nbuf_queue_remove(&_pdev->txnbufq);\
516 	HTT_TX_MUTEX_RELEASE(&_pdev->txnbufq_mutex);    \
517 	} while (0)
518 
519 #define HTT_TX_NBUF_QUEUE_ADD(_pdev, _msdu) do {	\
520 	HTT_TX_MUTEX_ACQUIRE(&_pdev->txnbufq_mutex);	\
521 	qdf_nbuf_queue_add(&_pdev->txnbufq, _msdu);     \
522 	HTT_TX_MUTEX_RELEASE(&_pdev->txnbufq_mutex);    \
523 	} while (0)
524 
525 #define HTT_TX_NBUF_QUEUE_INSERT_HEAD(_pdev, _msdu) do {   \
526 	HTT_TX_MUTEX_ACQUIRE(&_pdev->txnbufq_mutex);	   \
527 	qdf_nbuf_queue_insert_head(&_pdev->txnbufq, _msdu);\
528 	HTT_TX_MUTEX_RELEASE(&_pdev->txnbufq_mutex);       \
529 	} while (0)
530 #else
531 
532 #define HTT_TX_NBUF_QUEUE_MUTEX_INIT(_pdev)
533 #define HTT_TX_NBUF_QUEUE_REMOVE(_pdev, _msdu)
534 #define HTT_TX_NBUF_QUEUE_ADD(_pdev, _msdu)
535 #define HTT_TX_NBUF_QUEUE_INSERT_HEAD(_pdev, _msdu)
536 #define HTT_TX_NBUF_QUEUE_MUTEX_DESTROY(_pdev)
537 
538 #endif
539 
540 #ifdef CONFIG_HL_SUPPORT
541 
htt_tx_resume_handler(void * context)542 static inline void htt_tx_resume_handler(void *context)
543 {
544 }
545 #else
546 
547 void htt_tx_resume_handler(void *context);
548 #endif
549 
550 #ifdef ATH_11AC_TXCOMPACT
551 #define HTT_TX_SCHED htt_tx_sched
552 #else
553 #define HTT_TX_SCHED(pdev)      /* no-op */
554 #endif
555 
556 int htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems);
557 
558 void htt_tx_detach(struct htt_pdev_t *pdev);
559 
560 int htt_rx_attach(struct htt_pdev_t *pdev);
561 
562 #if defined(CONFIG_HL_SUPPORT)
563 
htt_rx_detach(struct htt_pdev_t * pdev)564 static inline void htt_rx_detach(struct htt_pdev_t *pdev)
565 {
566 }
567 #else
568 
569 void htt_rx_detach(struct htt_pdev_t *pdev);
570 #endif
571 
572 int htt_htc_attach(struct htt_pdev_t *pdev, uint16_t service_id);
573 
574 void htt_t2h_msg_handler(void *context, HTC_PACKET *pkt);
575 #ifdef WLAN_FEATURE_FASTPATH
576 void htt_t2h_msg_handler_fast(void *htt_pdev, qdf_nbuf_t *cmpl_msdus,
577 			      uint32_t num_cmpls);
578 #else
htt_t2h_msg_handler_fast(void * htt_pdev,qdf_nbuf_t * cmpl_msdus,uint32_t num_cmpls)579 static inline void htt_t2h_msg_handler_fast(void *htt_pdev,
580 					   qdf_nbuf_t *cmpl_msdus,
581 					   uint32_t num_cmpls)
582 {
583 }
584 #endif
585 
586 void htt_h2t_send_complete(void *context, HTC_PACKET *pkt);
587 
588 QDF_STATUS htt_h2t_ver_req_msg(struct htt_pdev_t *pdev);
589 
590 int htt_tx_padding_credit_update_handler(void *context, int pad_credit);
591 
592 #if defined(HELIUMPLUS)
593 QDF_STATUS
594 htt_h2t_frag_desc_bank_cfg_msg(struct htt_pdev_t *pdev);
595 #endif /* defined(HELIUMPLUS) */
596 
597 QDF_STATUS htt_h2t_rx_ring_cfg_msg_ll(struct htt_pdev_t *pdev);
598 
599 QDF_STATUS htt_h2t_rx_ring_rfs_cfg_msg_ll(struct htt_pdev_t *pdev);
600 
601 QDF_STATUS htt_h2t_rx_ring_rfs_cfg_msg_hl(struct htt_pdev_t *pdev);
602 
603 QDF_STATUS htt_h2t_rx_ring_cfg_msg_hl(struct htt_pdev_t *pdev);
604 
605 extern QDF_STATUS (*htt_h2t_rx_ring_cfg_msg)(struct htt_pdev_t *pdev);
606 
607 enum htc_send_full_action htt_h2t_full(void *context, HTC_PACKET *pkt);
608 
609 struct htt_htc_pkt *htt_htc_pkt_alloc(struct htt_pdev_t *pdev);
610 
611 void htt_htc_pkt_free(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt);
612 
613 void htt_htc_pkt_pool_free(struct htt_pdev_t *pdev);
614 
615 #ifdef ATH_11AC_TXCOMPACT
616 void htt_htc_misc_pkt_list_trim(struct htt_pdev_t *pdev, int level);
617 
618 void
619 htt_htc_misc_pkt_list_add(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt);
620 
621 void htt_htc_misc_pkt_pool_free(struct htt_pdev_t *pdev);
622 #endif
623 
624 #ifdef WLAN_FULL_REORDER_OFFLOAD
625 int
626 htt_rx_hash_list_insert(struct htt_pdev_t *pdev,
627 			qdf_dma_addr_t paddr,
628 			qdf_nbuf_t netbuf);
629 #else
630 static inline int
htt_rx_hash_list_insert(struct htt_pdev_t * pdev,qdf_dma_addr_t paddr,qdf_nbuf_t netbuf)631 htt_rx_hash_list_insert(struct htt_pdev_t *pdev,
632 			qdf_dma_addr_t paddr,
633 			qdf_nbuf_t netbuf)
634 {
635 	return 0;
636 }
637 #endif
638 
639 qdf_nbuf_t
640 htt_rx_hash_list_lookup(struct htt_pdev_t *pdev, qdf_dma_addr_t paddr);
641 
642 #ifdef IPA_OFFLOAD
643 int
644 htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
645 		     unsigned int uc_tx_buf_sz,
646 		     unsigned int uc_tx_buf_cnt,
647 		     unsigned int uc_tx_partition_base);
648 
649 int
650 htt_rx_ipa_uc_attach(struct htt_pdev_t *pdev, unsigned int rx_ind_ring_size);
651 
652 int htt_tx_ipa_uc_detach(struct htt_pdev_t *pdev);
653 
654 int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev);
655 
656 #else
657 /**
658  * htt_tx_ipa_uc_attach() - attach htt ipa uc tx resource
659  * @pdev: htt context
660  * @uc_tx_buf_sz: single tx buffer size
661  * @uc_tx_buf_cnt: total tx buffer count
662  * @uc_tx_partition_base: tx buffer partition start
663  *
664  * Return: 0 success
665  */
666 static inline int
htt_tx_ipa_uc_attach(struct htt_pdev_t * pdev,unsigned int uc_tx_buf_sz,unsigned int uc_tx_buf_cnt,unsigned int uc_tx_partition_base)667 htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
668 		     unsigned int uc_tx_buf_sz,
669 		     unsigned int uc_tx_buf_cnt,
670 		     unsigned int uc_tx_partition_base)
671 {
672 	return 0;
673 }
674 
675 /**
676  * htt_rx_ipa_uc_attach() - attach htt ipa uc rx resource
677  * @pdev: htt context
678  * @rx_ind_ring_size: rx ring size
679  *
680  * Return: 0 success
681  */
682 static inline int
htt_rx_ipa_uc_attach(struct htt_pdev_t * pdev,unsigned int rx_ind_ring_size)683 htt_rx_ipa_uc_attach(struct htt_pdev_t *pdev, unsigned int rx_ind_ring_size)
684 {
685 	return 0;
686 }
687 
htt_tx_ipa_uc_detach(struct htt_pdev_t * pdev)688 static inline int htt_tx_ipa_uc_detach(struct htt_pdev_t *pdev)
689 {
690 	return 0;
691 }
692 
htt_rx_ipa_uc_detach(struct htt_pdev_t * pdev)693 static inline int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev)
694 {
695 	return 0;
696 }
697 
698 #endif /* IPA_OFFLOAD */
699 
700 /* Maximum Outstanding Bus Download */
701 #define HTT_MAX_BUS_CREDIT 33
702 
703 #ifdef CONFIG_HL_SUPPORT
704 
705 /**
706  * htt_tx_credit_update() - check for diff in bus delta and target delta
707  * @pdev: pointer to htt device.
708  *
709  * Return: min of bus delta and target delta
710  */
711 int
712 htt_tx_credit_update(struct htt_pdev_t *pdev);
713 #else
714 
715 static inline int
htt_tx_credit_update(struct htt_pdev_t * pdev)716 htt_tx_credit_update(struct htt_pdev_t *pdev)
717 {
718 	return 0;
719 }
720 #endif
721 
722 
723 #ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
724 
725 #define HTT_TX_GROUP_INDEX_OFFSET \
726 (sizeof(struct htt_txq_group) / sizeof(u_int32_t))
727 
728 void htt_tx_group_credit_process(struct htt_pdev_t *pdev, u_int32_t *msg_word);
729 #else
730 
731 static inline
htt_tx_group_credit_process(struct htt_pdev_t * pdev,u_int32_t * msg_word)732 void htt_tx_group_credit_process(struct htt_pdev_t *pdev, u_int32_t *msg_word)
733 {
734 }
735 #endif
736 
737 #ifdef DEBUG_RX_RING_BUFFER
738 /**
739  * htt_rx_dbg_rxbuf_init() - init debug rx buff list
740  * @pdev: pdev handle
741  *
742  * Allocation is done from bss segment. This uses vmalloc and has a bit
743  * of an overhead compared to kmalloc (which qdf_mem_alloc wraps). The impact
744  * of the overhead to performance will need to be quantified.
745  *
746  * Return: none
747  */
748 static struct rx_buf_debug rx_buff_list_bss[HTT_RX_RING_BUFF_DBG_LIST];
749 static inline
htt_rx_dbg_rxbuf_init(struct htt_pdev_t * pdev)750 void htt_rx_dbg_rxbuf_init(struct htt_pdev_t *pdev)
751 {
752 	pdev->rx_buff_list = rx_buff_list_bss;
753 	qdf_spinlock_create(&(pdev->rx_buff_list_lock));
754 	pdev->rx_buff_index = 0;
755 	pdev->rx_buff_posted_cum = 0;
756 	pdev->rx_buff_recvd_cum  = 0;
757 	pdev->rx_buff_recvd_err  = 0;
758 	pdev->refill_retry_timer_starts = 0;
759 	pdev->refill_retry_timer_calls = 0;
760 	pdev->refill_retry_timer_doubles = 0;
761 }
762 
763 /**
764  * htt_display_rx_buf_debug() - display debug rx buff list and some counters
765  * @pdev: pdev handle
766  *
767  * Return: Success
768  */
htt_display_rx_buf_debug(struct htt_pdev_t * pdev)769 static inline int htt_display_rx_buf_debug(struct htt_pdev_t *pdev)
770 {
771 	int i;
772 	struct rx_buf_debug *buf;
773 
774 	if ((pdev) &&
775 	    (pdev->rx_buff_list)) {
776 		buf = pdev->rx_buff_list;
777 		for (i = 0; i < HTT_RX_RING_BUFF_DBG_LIST; i++) {
778 			if (buf[i].posted != 0)
779 				qdf_nofl_info("[%d][0x%x] %pK %lu %pK %llu %llu",
780 					      i, buf[i].cpu,
781 					      buf[i].nbuf_data,
782 					      (unsigned long)buf[i].paddr,
783 					      buf[i].nbuf,
784 					      buf[i].posted,
785 					      buf[i].recved);
786 		}
787 
788 		qdf_nofl_info("rxbuf_idx %d all_posted: %d all_recvd: %d recv_err: %d",
789 			      pdev->rx_buff_index,
790 			      pdev->rx_buff_posted_cum,
791 			      pdev->rx_buff_recvd_cum,
792 			      pdev->rx_buff_recvd_err);
793 
794 		qdf_nofl_info("timer kicks :%d actual  :%d restarts:%d debtors: %d fill_n: %d",
795 			      pdev->refill_retry_timer_starts,
796 			      pdev->refill_retry_timer_calls,
797 			      pdev->refill_retry_timer_doubles,
798 			      pdev->rx_buff_debt_invoked,
799 			      pdev->rx_buff_fill_n_invoked);
800 	} else
801 		return -EINVAL;
802 	return 0;
803 }
804 
805 /**
806  * htt_rx_dbg_rxbuf_set() - set element of rx buff list
807  * @pdev: pdev handle
808  * @paddr: physical address of netbuf
809  * @rx_netbuf: received netbuf
810  *
811  * Return: none
812  */
813 static inline
htt_rx_dbg_rxbuf_set(struct htt_pdev_t * pdev,qdf_dma_addr_t paddr,qdf_nbuf_t rx_netbuf)814 void htt_rx_dbg_rxbuf_set(struct htt_pdev_t *pdev, qdf_dma_addr_t paddr,
815 			  qdf_nbuf_t rx_netbuf)
816 {
817 	if (pdev->rx_buff_list) {
818 		qdf_spin_lock_bh(&(pdev->rx_buff_list_lock));
819 		pdev->rx_buff_list[pdev->rx_buff_index].paddr = paddr;
820 		pdev->rx_buff_list[pdev->rx_buff_index].nbuf  = rx_netbuf;
821 		pdev->rx_buff_list[pdev->rx_buff_index].nbuf_data =
822 							rx_netbuf->data;
823 		pdev->rx_buff_list[pdev->rx_buff_index].posted =
824 						qdf_get_log_timestamp();
825 		pdev->rx_buff_posted_cum++;
826 		pdev->rx_buff_list[pdev->rx_buff_index].recved = 0;
827 		pdev->rx_buff_list[pdev->rx_buff_index].cpu =
828 				(1 << qdf_get_cpu());
829 		QDF_NBUF_CB_RX_MAP_IDX(rx_netbuf) = pdev->rx_buff_index;
830 		if (++pdev->rx_buff_index >=
831 				HTT_RX_RING_BUFF_DBG_LIST)
832 			pdev->rx_buff_index = 0;
833 		qdf_spin_unlock_bh(&(pdev->rx_buff_list_lock));
834 	}
835 }
836 
837 /**
838  * htt_rx_dbg_rxbuf_set() - reset element of rx buff list
839  * @pdev: pdev handle
840  * @netbuf: rx sk_buff
841  * Return: none
842  */
843 static inline
htt_rx_dbg_rxbuf_reset(struct htt_pdev_t * pdev,qdf_nbuf_t netbuf)844 void htt_rx_dbg_rxbuf_reset(struct htt_pdev_t *pdev,
845 				qdf_nbuf_t netbuf)
846 {
847 	uint32_t index;
848 
849 	if (pdev->rx_buff_list) {
850 		qdf_spin_lock_bh(&(pdev->rx_buff_list_lock));
851 		index = QDF_NBUF_CB_RX_MAP_IDX(netbuf);
852 		if (index < HTT_RX_RING_BUFF_DBG_LIST) {
853 			pdev->rx_buff_list[index].recved =
854 				qdf_get_log_timestamp();
855 			pdev->rx_buff_recvd_cum++;
856 		} else {
857 			pdev->rx_buff_recvd_err++;
858 		}
859 		pdev->rx_buff_list[pdev->rx_buff_index].cpu |=
860 				(1 << qdf_get_cpu());
861 		qdf_spin_unlock_bh(&(pdev->rx_buff_list_lock));
862 	}
863 }
864 /**
865  * htt_rx_dbg_rxbuf_indupd() - add a record for alloc index update
866  * @pdev: pdev handle
867  * @idx : value of the index
868  *
869  * Return: none
870  */
871 static inline
htt_rx_dbg_rxbuf_indupd(struct htt_pdev_t * pdev,int alloc_index)872 void htt_rx_dbg_rxbuf_indupd(struct htt_pdev_t *pdev, int alloc_index)
873 {
874 	if (pdev->rx_buff_list) {
875 		qdf_spin_lock_bh(&(pdev->rx_buff_list_lock));
876 		pdev->rx_buff_list[pdev->rx_buff_index].paddr = 0;
877 		pdev->rx_buff_list[pdev->rx_buff_index].nbuf  = 0;
878 		pdev->rx_buff_list[pdev->rx_buff_index].nbuf_data = 0;
879 		pdev->rx_buff_list[pdev->rx_buff_index].posted =
880 						qdf_get_log_timestamp();
881 		pdev->rx_buff_list[pdev->rx_buff_index].recved =
882 			(uint64_t)alloc_index;
883 		pdev->rx_buff_list[pdev->rx_buff_index].cpu =
884 				(1 << qdf_get_cpu());
885 		if (++pdev->rx_buff_index >=
886 				HTT_RX_RING_BUFF_DBG_LIST)
887 			pdev->rx_buff_index = 0;
888 		qdf_spin_unlock_bh(&(pdev->rx_buff_list_lock));
889 	}
890 }
891 /**
892  * htt_rx_dbg_rxbuf_httrxind() - add a record for recipt of htt rx_ind msg
893  * @pdev: pdev handle
894  *
895  * Return: none
896  */
897 static inline
htt_rx_dbg_rxbuf_httrxind(struct htt_pdev_t * pdev,unsigned int msdu_cnt)898 void htt_rx_dbg_rxbuf_httrxind(struct htt_pdev_t *pdev, unsigned int msdu_cnt)
899 {
900 	if (pdev->rx_buff_list) {
901 		qdf_spin_lock_bh(&(pdev->rx_buff_list_lock));
902 		pdev->rx_buff_list[pdev->rx_buff_index].paddr = msdu_cnt;
903 		pdev->rx_buff_list[pdev->rx_buff_index].nbuf  = 0;
904 		pdev->rx_buff_list[pdev->rx_buff_index].nbuf_data = 0;
905 		pdev->rx_buff_list[pdev->rx_buff_index].posted =
906 						qdf_get_log_timestamp();
907 		pdev->rx_buff_list[pdev->rx_buff_index].recved =
908 			(uint64_t)0x48545452584D5347; /* 'HTTRXMSG' */
909 		pdev->rx_buff_list[pdev->rx_buff_index].cpu =
910 				(1 << qdf_get_cpu());
911 		if (++pdev->rx_buff_index >=
912 				HTT_RX_RING_BUFF_DBG_LIST)
913 			pdev->rx_buff_index = 0;
914 		qdf_spin_unlock_bh(&(pdev->rx_buff_list_lock));
915 	}
916 }
917 
918 /**
919  * htt_rx_dbg_rxbuf_deinit() - deinit debug rx buff list
920  * @pdev: pdev handle
921  *
922  * Return: none
923  */
924 static inline
htt_rx_dbg_rxbuf_deinit(struct htt_pdev_t * pdev)925 void htt_rx_dbg_rxbuf_deinit(struct htt_pdev_t *pdev)
926 {
927 	if (pdev->rx_buff_list)
928 		pdev->rx_buff_list = NULL;
929 	qdf_spinlock_destroy(&(pdev->rx_buff_list_lock));
930 }
931 #else
932 static inline
htt_rx_dbg_rxbuf_init(struct htt_pdev_t * pdev)933 void htt_rx_dbg_rxbuf_init(struct htt_pdev_t *pdev)
934 {
935 }
htt_display_rx_buf_debug(struct htt_pdev_t * pdev)936 static inline int htt_display_rx_buf_debug(struct htt_pdev_t *pdev)
937 {
938 	return 0;
939 }
940 
941 static inline
htt_rx_dbg_rxbuf_set(struct htt_pdev_t * pdev,uint32_t paddr,qdf_nbuf_t rx_netbuf)942 void htt_rx_dbg_rxbuf_set(struct htt_pdev_t *pdev,
943 				uint32_t paddr,
944 				qdf_nbuf_t rx_netbuf)
945 {
946 }
947 static inline
htt_rx_dbg_rxbuf_reset(struct htt_pdev_t * pdev,qdf_nbuf_t netbuf)948 void htt_rx_dbg_rxbuf_reset(struct htt_pdev_t *pdev,
949 				qdf_nbuf_t netbuf)
950 {
951 }
952 static inline
htt_rx_dbg_rxbuf_indupd(struct htt_pdev_t * pdev,int alloc_index)953 void htt_rx_dbg_rxbuf_indupd(struct htt_pdev_t *pdev,
954 			     int    alloc_index)
955 {
956 }
957 static inline
htt_rx_dbg_rxbuf_httrxind(struct htt_pdev_t * pdev,unsigned int msdu_cnt)958 void htt_rx_dbg_rxbuf_httrxind(struct htt_pdev_t *pdev,
959 			       unsigned int msdu_cnt)
960 {
961 }
962 static inline
htt_rx_dbg_rxbuf_deinit(struct htt_pdev_t * pdev)963 void htt_rx_dbg_rxbuf_deinit(struct htt_pdev_t *pdev)
964 {
965 	return;
966 }
967 #endif
968 
969 #ifndef HTT_RX_RING_SIZE_MIN
970 #define HTT_RX_RING_SIZE_MIN 128        /* slightly > than one large A-MPDU */
971 #endif
972 
973 #ifndef HTT_RX_RING_SIZE_MAX
974 #define HTT_RX_RING_SIZE_MAX 2048       /* ~20 ms @ 1 Gbps of 1500B MSDUs */
975 #endif
976 
977 #ifndef HTT_RX_RING_SIZE_1x1
978 #define HTT_RX_RING_SIZE_1x1 1024      /* ~20 ms @ 400 Mbps of 1500B MSDUs */
979 #endif
980 
981 #ifndef HTT_RX_AVG_FRM_BYTES
982 #define HTT_RX_AVG_FRM_BYTES 1000
983 #endif
984 
985 #define HTT_FCS_LEN (4)
986 
987 #ifdef HTT_DEBUG_DATA
988 #define HTT_PKT_DUMP(x) x
989 #else
990 #define HTT_PKT_DUMP(x) /* no-op */
991 #endif
992 
993 #ifdef RX_HASH_DEBUG
994 #define HTT_RX_CHECK_MSDU_COUNT(msdu_count) HTT_ASSERT_ALWAYS(msdu_count)
995 #else
996 #define HTT_RX_CHECK_MSDU_COUNT(msdu_count)     /* no-op */
997 #endif
998 
999 #if HTT_PADDR64
1000 #define NEXT_FIELD_OFFSET_IN32 2
1001 #else /* ! HTT_PADDR64 */
1002 #define NEXT_FIELD_OFFSET_IN32 1
1003 #endif /* HTT_PADDR64 */
1004 
1005 #define RX_PADDR_MAGIC_PATTERN 0xDEAD0000
1006 
1007 #if HTT_PADDR64
htt_paddr_trim_to_37(qdf_dma_addr_t paddr)1008 static inline qdf_dma_addr_t htt_paddr_trim_to_37(qdf_dma_addr_t paddr)
1009 {
1010 	qdf_dma_addr_t ret = paddr;
1011 
1012 	if (sizeof(paddr) > 4)
1013 		ret &= 0x1fffffffff;
1014 	return ret;
1015 }
1016 #else /* not 64 bits */
htt_paddr_trim_to_37(qdf_dma_addr_t paddr)1017 static inline qdf_dma_addr_t htt_paddr_trim_to_37(qdf_dma_addr_t paddr)
1018 {
1019 	return paddr;
1020 }
1021 #endif /* HTT_PADDR64 */
1022 
1023 #ifdef WLAN_FULL_REORDER_OFFLOAD
1024 #ifdef ENABLE_DEBUG_ADDRESS_MARKING
1025 static inline qdf_dma_addr_t
htt_rx_paddr_unmark_high_bits(qdf_dma_addr_t paddr)1026 htt_rx_paddr_unmark_high_bits(qdf_dma_addr_t paddr)
1027 {
1028 	uint32_t markings;
1029 
1030 	if (sizeof(qdf_dma_addr_t) > 4) {
1031 		markings = (uint32_t)((paddr >> 16) >> 16);
1032 		/*
1033 		 * check if it is marked correctly:
1034 		 * See the mark_high_bits function above for the expected
1035 		 * pattern.
1036 		 * the LS 5 bits are the high bits of physical address
1037 		 * padded (with 0b0) to 8 bits
1038 		 */
1039 		if ((markings & 0xFFFF0000) != RX_PADDR_MAGIC_PATTERN) {
1040 			qdf_print("paddr not marked correctly: 0x%pK!\n",
1041 				  (void *)paddr);
1042 			HTT_ASSERT_ALWAYS(0);
1043 		}
1044 
1045 		/* clear markings  for further use */
1046 		paddr = htt_paddr_trim_to_37(paddr);
1047 	}
1048 	return paddr;
1049 }
1050 
1051 static inline
htt_rx_in_ord_paddr_get(uint32_t * u32p)1052 qdf_dma_addr_t htt_rx_in_ord_paddr_get(uint32_t *u32p)
1053 {
1054 	qdf_dma_addr_t paddr = 0;
1055 
1056 	paddr = (qdf_dma_addr_t)HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(*u32p);
1057 	if (sizeof(qdf_dma_addr_t) > 4) {
1058 		u32p++;
1059 		/* 32 bit architectures dont like <<32 */
1060 		paddr |= (((qdf_dma_addr_t)
1061 			  HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(*u32p))
1062 			  << 16 << 16);
1063 	}
1064 	paddr = htt_rx_paddr_unmark_high_bits(paddr);
1065 
1066 	return paddr;
1067 }
1068 #else
1069 #if HTT_PADDR64
1070 static inline
htt_rx_in_ord_paddr_get(uint32_t * u32p)1071 qdf_dma_addr_t htt_rx_in_ord_paddr_get(uint32_t *u32p)
1072 {
1073 	qdf_dma_addr_t paddr = 0;
1074 
1075 	paddr = (qdf_dma_addr_t)HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(*u32p);
1076 	if (sizeof(qdf_dma_addr_t) > 4) {
1077 		u32p++;
1078 		/* 32 bit architectures dont like <<32 */
1079 		paddr |= (((qdf_dma_addr_t)
1080 			  HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(*u32p))
1081 			  << 16 << 16);
1082 	}
1083 	return paddr;
1084 }
1085 #else
1086 static inline
htt_rx_in_ord_paddr_get(uint32_t * u32p)1087 qdf_dma_addr_t htt_rx_in_ord_paddr_get(uint32_t *u32p)
1088 {
1089 	return HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(*u32p);
1090 }
1091 #endif
1092 #endif /* ENABLE_DEBUG_ADDRESS_MARKING */
1093 
1094 static inline
htt_rx_in_order_ring_elems(struct htt_pdev_t * pdev)1095 unsigned int htt_rx_in_order_ring_elems(struct htt_pdev_t *pdev)
1096 {
1097 	return (*pdev->rx_ring.alloc_idx.vaddr -
1098 		*pdev->rx_ring.target_idx.vaddr) &
1099 		pdev->rx_ring.size_mask;
1100 }
1101 
1102 static inline qdf_nbuf_t
htt_rx_in_order_netbuf_pop(htt_pdev_handle pdev,qdf_dma_addr_t paddr)1103 htt_rx_in_order_netbuf_pop(htt_pdev_handle pdev, qdf_dma_addr_t paddr)
1104 {
1105 	HTT_ASSERT1(htt_rx_in_order_ring_elems(pdev) != 0);
1106 	qdf_atomic_dec(&pdev->rx_ring.fill_cnt);
1107 	paddr = htt_paddr_trim_to_37(paddr);
1108 	return htt_rx_hash_list_lookup(pdev, paddr);
1109 }
1110 
1111 #else
1112 static inline
htt_rx_in_ord_paddr_get(uint32_t * u32p)1113 qdf_dma_addr_t htt_rx_in_ord_paddr_get(uint32_t *u32p)
1114 {
1115 	return 0;
1116 }
1117 
1118 static inline qdf_nbuf_t
htt_rx_in_order_netbuf_pop(htt_pdev_handle pdev,qdf_dma_addr_t paddr)1119 htt_rx_in_order_netbuf_pop(htt_pdev_handle pdev, qdf_dma_addr_t paddr)
1120 {
1121 	return NULL;
1122 }
1123 #endif
1124 
1125 #if defined(FEATURE_MONITOR_MODE_SUPPORT) && defined(WLAN_FULL_REORDER_OFFLOAD)
1126 int htt_rx_mon_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
1127 					qdf_nbuf_t rx_ind_msg,
1128 					qdf_nbuf_t *head_msdu,
1129 					qdf_nbuf_t *tail_msdu,
1130 					uint32_t *replenish_cnt);
1131 
1132 /**
1133  * htt_rx_mon_get_rx_status() - Update information about the rx status,
1134  * which is used later for radiotap updation.
1135  * @pdev: Pointer to pdev handle
1136  * @rx_desc: Pointer to struct htt_host_rx_desc_base
1137  * @rx_status: Return variable updated with rx_status
1138  *
1139  * Return: None
1140  */
1141 void htt_rx_mon_get_rx_status(htt_pdev_handle pdev,
1142 			      struct htt_host_rx_desc_base *rx_desc,
1143 			      struct mon_rx_status *rx_status);
1144 #else
1145 static inline
htt_rx_mon_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,qdf_nbuf_t rx_ind_msg,qdf_nbuf_t * head_msdu,qdf_nbuf_t * tail_msdu,uint32_t * replenish_cnt)1146 int htt_rx_mon_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
1147 					qdf_nbuf_t rx_ind_msg,
1148 					qdf_nbuf_t *head_msdu,
1149 					qdf_nbuf_t *tail_msdu,
1150 					uint32_t *replenish_cnt)
1151 {
1152 	return 0;
1153 }
1154 
1155 static inline
htt_rx_mon_get_rx_status(htt_pdev_handle pdev,struct htt_host_rx_desc_base * rx_desc,struct mon_rx_status * rx_status)1156 void htt_rx_mon_get_rx_status(htt_pdev_handle pdev,
1157 			      struct htt_host_rx_desc_base *rx_desc,
1158 			      struct mon_rx_status *rx_status)
1159 {
1160 }
1161 #endif
1162 
1163 #endif /* _HTT_INTERNAL__H_ */
1164