1 /*
2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include "qdf_module.h"
21 #include "hal_li_hw_headers.h"
22 #include "hal_reo.h"
23 #include "hal_li_reo.h"
24 #include "hal_li_api.h"
25
hal_get_reo_reg_base_offset_li(void)26 uint32_t hal_get_reo_reg_base_offset_li(void)
27 {
28 return SEQ_WCSS_UMAC_REO_REG_OFFSET;
29 }
30
hal_reo_qdesc_setup_li(hal_soc_handle_t hal_soc_hdl,int tid,uint32_t ba_window_size,uint32_t start_seq,void * hw_qdesc_vaddr,qdf_dma_addr_t hw_qdesc_paddr,int pn_type,uint8_t vdev_stats_id)31 void hal_reo_qdesc_setup_li(hal_soc_handle_t hal_soc_hdl, int tid,
32 uint32_t ba_window_size,
33 uint32_t start_seq, void *hw_qdesc_vaddr,
34 qdf_dma_addr_t hw_qdesc_paddr,
35 int pn_type, uint8_t vdev_stats_id)
36 {
37 uint32_t *reo_queue_desc = (uint32_t *)hw_qdesc_vaddr;
38 uint32_t *reo_queue_ext_desc;
39 uint32_t reg_val;
40 uint32_t pn_enable;
41 uint32_t pn_size = 0;
42
43 qdf_mem_zero(hw_qdesc_vaddr, sizeof(struct rx_reo_queue));
44
45 hal_uniform_desc_hdr_setup(reo_queue_desc, HAL_DESC_REO_OWNED,
46 HAL_REO_QUEUE_DESC);
47 /* Fixed pattern in reserved bits for debugging */
48 HAL_DESC_SET_FIELD(reo_queue_desc, UNIFORM_DESCRIPTOR_HEADER_0,
49 RESERVED_0A, 0xDDBEEF);
50
51 /* This a just a SW meta data and will be copied to REO destination
52 * descriptors indicated by hardware.
53 * TODO: Setting TID in this field. See if we should set something else.
54 */
55 HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_1,
56 RECEIVE_QUEUE_NUMBER, tid);
57 HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
58 VLD, 1);
59 HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
60 ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
61 HAL_RX_LINK_DESC_CNTR);
62
63 /*
64 * Fields DISABLE_DUPLICATE_DETECTION and SOFT_REORDER_ENABLE will be 0
65 */
66
67 reg_val = TID_TO_WME_AC(tid);
68 HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, AC, reg_val);
69
70 if (ba_window_size < 1)
71 ba_window_size = 1;
72
73 /* WAR to get 2k exception in Non BA case.
74 * Setting window size to 2 to get 2k jump exception
75 * when we receive aggregates in Non BA case
76 */
77 ba_window_size = hal_update_non_ba_win_size(tid, ba_window_size);
78
79 /* Set RTY bit for non-BA case. Duplicate detection is currently not
80 * done by HW in non-BA case if RTY bit is not set.
81 * TODO: This is a temporary War and should be removed once HW fix is
82 * made to check and discard duplicates even if RTY bit is not set.
83 */
84 if (ba_window_size == 1)
85 HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, RTY, 1);
86
87 HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, BA_WINDOW_SIZE,
88 ba_window_size - 1);
89
90 switch (pn_type) {
91 case HAL_PN_WPA:
92 pn_enable = 1;
93 pn_size = PN_SIZE_48;
94 break;
95 case HAL_PN_WAPI_EVEN:
96 case HAL_PN_WAPI_UNEVEN:
97 pn_enable = 1;
98 pn_size = PN_SIZE_128;
99 break;
100 default:
101 pn_enable = 0;
102 break;
103 }
104
105 HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_CHECK_NEEDED,
106 pn_enable);
107
108 if (pn_type == HAL_PN_WAPI_EVEN)
109 HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
110 PN_SHALL_BE_EVEN, 1);
111 else if (pn_type == HAL_PN_WAPI_UNEVEN)
112 HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
113 PN_SHALL_BE_UNEVEN, 1);
114
115 /*
116 * TODO: Need to check if PN handling in SW needs to be enabled
117 * So far this is not a requirement
118 */
119
120 HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_SIZE,
121 pn_size);
122
123 /* TODO: Check if RX_REO_QUEUE_2_IGNORE_AMPDU_FLAG need to be set
124 * based on BA window size and/or AMPDU capabilities
125 */
126 HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
127 IGNORE_AMPDU_FLAG, 1);
128
129 if (start_seq <= 0xfff)
130 HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SSN,
131 start_seq);
132
133 /* TODO: SVLD should be set to 1 if a valid SSN is received in ADDBA,
134 * but REO is not delivering packets if we set it to 1. Need to enable
135 * this once the issue is resolved
136 */
137 HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SVLD, 0);
138
139 /* TODO: Check if we should set start PN for WAPI */
140
141 /* TODO: HW queue descriptors are currently allocated for max BA
142 * window size for all QOS TIDs so that same descriptor can be used
143 * later when ADDBA request is received. This should be changed to
144 * allocate HW queue descriptors based on BA window size being
145 * negotiated (0 for non BA cases), and reallocate when BA window
146 * size changes and also send WMI message to FW to change the REO
147 * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
148 */
149 if (tid == HAL_NON_QOS_TID)
150 return;
151
152 reo_queue_ext_desc = (uint32_t *)
153 (((struct rx_reo_queue *)reo_queue_desc) + 1);
154 qdf_mem_zero(reo_queue_ext_desc, 3 *
155 sizeof(struct rx_reo_queue_ext));
156 /* Initialize first reo queue extension descriptor */
157 hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
158 HAL_DESC_REO_OWNED,
159 HAL_REO_QUEUE_EXT_DESC);
160 /* Fixed pattern in reserved bits for debugging */
161 HAL_DESC_SET_FIELD(reo_queue_ext_desc,
162 UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A,
163 0xADBEEF);
164 /* Initialize second reo queue extension descriptor */
165 reo_queue_ext_desc = (uint32_t *)
166 (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
167 hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
168 HAL_DESC_REO_OWNED,
169 HAL_REO_QUEUE_EXT_DESC);
170 /* Fixed pattern in reserved bits for debugging */
171 HAL_DESC_SET_FIELD(reo_queue_ext_desc,
172 UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A,
173 0xBDBEEF);
174 /* Initialize third reo queue extension descriptor */
175 reo_queue_ext_desc = (uint32_t *)
176 (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
177 hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
178 HAL_DESC_REO_OWNED,
179 HAL_REO_QUEUE_EXT_DESC);
180 /* Fixed pattern in reserved bits for debugging */
181 HAL_DESC_SET_FIELD(reo_queue_ext_desc,
182 UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A,
183 0xCDBEEF);
184 }
185
186 qdf_export_symbol(hal_reo_qdesc_setup_li);
187
hal_get_ba_aging_timeout_li(hal_soc_handle_t hal_soc_hdl,uint8_t ac,uint32_t * value)188 void hal_get_ba_aging_timeout_li(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
189 uint32_t *value)
190 {
191 struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
192
193 switch (ac) {
194 case WME_AC_BE:
195 *value = HAL_REG_READ(soc,
196 HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
197 SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
198 break;
199 case WME_AC_BK:
200 *value = HAL_REG_READ(soc,
201 HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
202 SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
203 break;
204 case WME_AC_VI:
205 *value = HAL_REG_READ(soc,
206 HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
207 SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
208 break;
209 case WME_AC_VO:
210 *value = HAL_REG_READ(soc,
211 HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
212 SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
213 break;
214 default:
215 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
216 "Invalid AC: %d\n", ac);
217 }
218 }
219 qdf_export_symbol(hal_get_ba_aging_timeout_li);
220
hal_set_ba_aging_timeout_li(hal_soc_handle_t hal_soc_hdl,uint8_t ac,uint32_t value)221 void hal_set_ba_aging_timeout_li(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
222 uint32_t value)
223 {
224 struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
225
226 switch (ac) {
227 case WME_AC_BE:
228 HAL_REG_WRITE(soc,
229 HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
230 SEQ_WCSS_UMAC_REO_REG_OFFSET),
231 value * 1000);
232 break;
233 case WME_AC_BK:
234 HAL_REG_WRITE(soc,
235 HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
236 SEQ_WCSS_UMAC_REO_REG_OFFSET),
237 value * 1000);
238 break;
239 case WME_AC_VI:
240 HAL_REG_WRITE(soc,
241 HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
242 SEQ_WCSS_UMAC_REO_REG_OFFSET),
243 value * 1000);
244 break;
245 case WME_AC_VO:
246 HAL_REG_WRITE(soc,
247 HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
248 SEQ_WCSS_UMAC_REO_REG_OFFSET),
249 value * 1000);
250 break;
251 default:
252 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
253 "Invalid AC: %d\n", ac);
254 }
255 }
256 qdf_export_symbol(hal_set_ba_aging_timeout_li);
257
258 static inline void
hal_reo_cmd_set_descr_addr_li(uint32_t * reo_desc,enum hal_reo_cmd_type type,uint32_t paddr_lo,uint8_t paddr_hi)259 hal_reo_cmd_set_descr_addr_li(uint32_t *reo_desc, enum hal_reo_cmd_type type,
260 uint32_t paddr_lo, uint8_t paddr_hi)
261 {
262 switch (type) {
263 case CMD_GET_QUEUE_STATS:
264 HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_1,
265 RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
266 HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2,
267 RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
268 break;
269 case CMD_FLUSH_QUEUE:
270 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_1,
271 FLUSH_DESC_ADDR_31_0, paddr_lo);
272 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
273 FLUSH_DESC_ADDR_39_32, paddr_hi);
274 break;
275 case CMD_FLUSH_CACHE:
276 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_1,
277 FLUSH_ADDR_31_0, paddr_lo);
278 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
279 FLUSH_ADDR_39_32, paddr_hi);
280 break;
281 case CMD_UPDATE_RX_REO_QUEUE:
282 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_1,
283 RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
284 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
285 RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
286 break;
287 default:
288 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
289 "%s: Invalid REO command type", __func__);
290 break;
291 }
292 }
293
294 static inline int
hal_reo_cmd_queue_stats_li(hal_ring_handle_t hal_ring_hdl,hal_soc_handle_t hal_soc_hdl,struct hal_reo_cmd_params * cmd)295 hal_reo_cmd_queue_stats_li(hal_ring_handle_t hal_ring_hdl,
296 hal_soc_handle_t hal_soc_hdl,
297 struct hal_reo_cmd_params *cmd)
298 {
299 uint32_t *reo_desc, val;
300 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
301
302 hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
303 reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
304 if (!reo_desc) {
305 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
306 "%s: Out of cmd ring entries", __func__);
307 hal_srng_access_end(hal_soc, hal_ring_hdl);
308 return -EBUSY;
309 }
310
311 HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E,
312 sizeof(struct reo_get_queue_stats));
313
314 /*
315 * Offsets of descriptor fields defined in HW headers start from
316 * the field after TLV header
317 */
318 reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
319 qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
320 sizeof(struct reo_get_queue_stats) -
321 (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
322
323 HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
324 REO_STATUS_REQUIRED, cmd->std.need_status);
325
326 hal_reo_cmd_set_descr_addr_li(reo_desc, CMD_GET_QUEUE_STATS,
327 cmd->std.addr_lo,
328 cmd->std.addr_hi);
329
330 HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, CLEAR_STATS,
331 cmd->u.stats_params.clear);
332
333 if (hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_HAL_REO_CMD) == 0) {
334 if (hif_system_pm_state_check(hal_soc->hif_handle)) {
335 hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
336 hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
337 hal_srng_inc_flush_cnt(hal_ring_hdl);
338 } else {
339 hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
340 }
341
342 hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_HAL_REO_CMD);
343 } else {
344 hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
345 hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
346 hal_srng_inc_flush_cnt(hal_ring_hdl);
347 }
348
349 val = reo_desc[CMD_HEADER_DW_OFFSET];
350 return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
351 val);
352 }
353
354 static inline int
hal_reo_cmd_flush_queue_li(hal_ring_handle_t hal_ring_hdl,hal_soc_handle_t hal_soc_hdl,struct hal_reo_cmd_params * cmd)355 hal_reo_cmd_flush_queue_li(hal_ring_handle_t hal_ring_hdl,
356 hal_soc_handle_t hal_soc_hdl,
357 struct hal_reo_cmd_params *cmd)
358 {
359 uint32_t *reo_desc, val;
360 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
361
362 hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
363 reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
364 if (!reo_desc) {
365 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
366 "%s: Out of cmd ring entries", __func__);
367 hal_srng_access_end(hal_soc, hal_ring_hdl);
368 return -EBUSY;
369 }
370
371 HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E,
372 sizeof(struct reo_flush_queue));
373
374 /*
375 * Offsets of descriptor fields defined in HW headers start from
376 * the field after TLV header
377 */
378 reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
379 qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
380 sizeof(struct reo_flush_queue) -
381 (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
382
383 HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
384 REO_STATUS_REQUIRED, cmd->std.need_status);
385
386 hal_reo_cmd_set_descr_addr_li(reo_desc, CMD_FLUSH_QUEUE,
387 cmd->std.addr_lo, cmd->std.addr_hi);
388
389 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
390 BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH,
391 cmd->u.fl_queue_params.block_use_after_flush);
392
393 if (cmd->u.fl_queue_params.block_use_after_flush) {
394 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
395 BLOCK_RESOURCE_INDEX,
396 cmd->u.fl_queue_params.index);
397 }
398
399 hal_srng_access_end(hal_soc, hal_ring_hdl);
400 val = reo_desc[CMD_HEADER_DW_OFFSET];
401 return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
402 val);
403 }
404
405 static inline int
hal_reo_cmd_flush_cache_li(hal_ring_handle_t hal_ring_hdl,hal_soc_handle_t hal_soc_hdl,struct hal_reo_cmd_params * cmd)406 hal_reo_cmd_flush_cache_li(hal_ring_handle_t hal_ring_hdl,
407 hal_soc_handle_t hal_soc_hdl,
408 struct hal_reo_cmd_params *cmd)
409 {
410 uint32_t *reo_desc, val;
411 struct hal_reo_cmd_flush_cache_params *cp;
412 uint8_t index = 0;
413 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
414
415 cp = &cmd->u.fl_cache_params;
416
417 hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
418
419 /* We need a cache block resource for this operation, and REO HW has
420 * only 4 such blocking resources. These resources are managed using
421 * reo_res_bitmap, and we return failure if none is available.
422 */
423 if (cp->block_use_after_flush) {
424 index = hal_find_zero_bit(hal_soc->reo_res_bitmap);
425 if (index > 3) {
426 qdf_print("No blocking resource available!");
427 hal_srng_access_end(hal_soc, hal_ring_hdl);
428 return -EBUSY;
429 }
430 hal_soc->index = index;
431 }
432
433 reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
434 if (!reo_desc) {
435 hal_srng_access_end(hal_soc, hal_ring_hdl);
436 hal_srng_dump(hal_ring_handle_to_hal_srng(hal_ring_hdl));
437 return -EBUSY;
438 }
439
440 HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E,
441 sizeof(struct reo_flush_cache));
442
443 /*
444 * Offsets of descriptor fields defined in HW headers start from
445 * the field after TLV header
446 */
447 reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
448 qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
449 sizeof(struct reo_flush_cache) -
450 (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
451
452 HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
453 REO_STATUS_REQUIRED, cmd->std.need_status);
454
455 hal_reo_cmd_set_descr_addr_li(reo_desc, CMD_FLUSH_CACHE,
456 cmd->std.addr_lo, cmd->std.addr_hi);
457
458 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
459 FORWARD_ALL_MPDUS_IN_QUEUE, cp->fwd_mpdus_in_queue);
460
461 /* set it to 0 for now */
462 cp->rel_block_index = 0;
463 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
464 RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index);
465
466 if (cp->block_use_after_flush) {
467 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
468 CACHE_BLOCK_RESOURCE_INDEX, index);
469 }
470
471 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
472 FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval);
473
474 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
475 BLOCK_CACHE_USAGE_AFTER_FLUSH,
476 cp->block_use_after_flush);
477
478 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, FLUSH_ENTIRE_CACHE,
479 cp->flush_entire_cache);
480
481 if (hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_HAL_REO_CMD) == 0) {
482 if (hif_system_pm_state_check(hal_soc->hif_handle)) {
483 hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
484 hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
485 hal_srng_inc_flush_cnt(hal_ring_hdl);
486 } else {
487 hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
488 }
489
490 hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_HAL_REO_CMD);
491 } else {
492 hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
493 hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
494 hal_srng_inc_flush_cnt(hal_ring_hdl);
495 }
496
497 val = reo_desc[CMD_HEADER_DW_OFFSET];
498 return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
499 val);
500 }
501
502 static inline int
hal_reo_cmd_unblock_cache_li(hal_ring_handle_t hal_ring_hdl,hal_soc_handle_t hal_soc_hdl,struct hal_reo_cmd_params * cmd)503 hal_reo_cmd_unblock_cache_li(hal_ring_handle_t hal_ring_hdl,
504 hal_soc_handle_t hal_soc_hdl,
505 struct hal_reo_cmd_params *cmd)
506
507 {
508 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
509 uint32_t *reo_desc, val;
510 uint8_t index = 0;
511
512 hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
513
514 if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
515 index = hal_find_one_bit(hal_soc->reo_res_bitmap);
516 if (index > 3) {
517 hal_srng_access_end(hal_soc, hal_ring_hdl);
518 qdf_print("No blocking resource to unblock!");
519 return -EBUSY;
520 }
521 }
522
523 reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
524 if (!reo_desc) {
525 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
526 "%s: Out of cmd ring entries", __func__);
527 hal_srng_access_end(hal_soc, hal_ring_hdl);
528 return -EBUSY;
529 }
530
531 HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E,
532 sizeof(struct reo_unblock_cache));
533
534 /*
535 * Offsets of descriptor fields defined in HW headers start from
536 * the field after TLV header
537 */
538 reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
539 qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
540 sizeof(struct reo_unblock_cache) -
541 (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
542
543 HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
544 REO_STATUS_REQUIRED, cmd->std.need_status);
545
546 HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
547 UNBLOCK_TYPE, cmd->u.unblk_cache_params.type);
548
549 if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
550 HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
551 CACHE_BLOCK_RESOURCE_INDEX,
552 cmd->u.unblk_cache_params.index);
553 }
554
555 hal_srng_access_end(hal_soc, hal_ring_hdl);
556 val = reo_desc[CMD_HEADER_DW_OFFSET];
557 return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
558 val);
559 }
560
561 static inline int
hal_reo_cmd_flush_timeout_list_li(hal_ring_handle_t hal_ring_hdl,hal_soc_handle_t hal_soc_hdl,struct hal_reo_cmd_params * cmd)562 hal_reo_cmd_flush_timeout_list_li(hal_ring_handle_t hal_ring_hdl,
563 hal_soc_handle_t hal_soc_hdl,
564 struct hal_reo_cmd_params *cmd)
565 {
566 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
567 uint32_t *reo_desc, val;
568
569 hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
570 reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
571 if (!reo_desc) {
572 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
573 "%s: Out of cmd ring entries", __func__);
574 hal_srng_access_end(hal_soc, hal_ring_hdl);
575 return -EBUSY;
576 }
577
578 HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E,
579 sizeof(struct reo_flush_timeout_list));
580
581 /*
582 * Offsets of descriptor fields defined in HW headers start from
583 * the field after TLV header
584 */
585 reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
586 qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
587 sizeof(struct reo_flush_timeout_list) -
588 (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
589
590 HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
591 REO_STATUS_REQUIRED, cmd->std.need_status);
592
593 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_1, AC_TIMOUT_LIST,
594 cmd->u.fl_tim_list_params.ac_list);
595
596 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
597 MINIMUM_RELEASE_DESC_COUNT,
598 cmd->u.fl_tim_list_params.min_rel_desc);
599
600 HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
601 MINIMUM_FORWARD_BUF_COUNT,
602 cmd->u.fl_tim_list_params.min_fwd_buf);
603
604 hal_srng_access_end(hal_soc, hal_ring_hdl);
605 val = reo_desc[CMD_HEADER_DW_OFFSET];
606 return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
607 val);
608 }
609
610 static inline int
hal_reo_cmd_update_rx_queue_li(hal_ring_handle_t hal_ring_hdl,hal_soc_handle_t hal_soc_hdl,struct hal_reo_cmd_params * cmd)611 hal_reo_cmd_update_rx_queue_li(hal_ring_handle_t hal_ring_hdl,
612 hal_soc_handle_t hal_soc_hdl,
613 struct hal_reo_cmd_params *cmd)
614 {
615 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
616 uint32_t *reo_desc, val;
617 struct hal_reo_cmd_update_queue_params *p;
618
619 p = &cmd->u.upd_queue_params;
620
621 hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
622 reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
623 if (!reo_desc) {
624 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
625 "%s: Out of cmd ring entries", __func__);
626 hal_srng_access_end(hal_soc, hal_ring_hdl);
627 return -EBUSY;
628 }
629
630 HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E,
631 sizeof(struct reo_update_rx_reo_queue));
632
633 /*
634 * Offsets of descriptor fields defined in HW headers start from
635 * the field after TLV header
636 */
637 reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
638 qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
639 sizeof(struct reo_update_rx_reo_queue) -
640 (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
641
642 HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
643 REO_STATUS_REQUIRED, cmd->std.need_status);
644
645 hal_reo_cmd_set_descr_addr_li(reo_desc, CMD_UPDATE_RX_REO_QUEUE,
646 cmd->std.addr_lo, cmd->std.addr_hi);
647
648 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
649 UPDATE_RECEIVE_QUEUE_NUMBER, p->update_rx_queue_num);
650
651 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, UPDATE_VLD,
652 p->update_vld);
653
654 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
655 UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
656 p->update_assoc_link_desc);
657
658 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
659 UPDATE_DISABLE_DUPLICATE_DETECTION,
660 p->update_disable_dup_detect);
661
662 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
663 UPDATE_DISABLE_DUPLICATE_DETECTION,
664 p->update_disable_dup_detect);
665
666 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
667 UPDATE_SOFT_REORDER_ENABLE,
668 p->update_soft_reorder_enab);
669
670 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
671 UPDATE_AC, p->update_ac);
672
673 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
674 UPDATE_BAR, p->update_bar);
675
676 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
677 UPDATE_BAR, p->update_bar);
678
679 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
680 UPDATE_RTY, p->update_rty);
681
682 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
683 UPDATE_CHK_2K_MODE, p->update_chk_2k_mode);
684
685 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
686 UPDATE_OOR_MODE, p->update_oor_mode);
687
688 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
689 UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size);
690
691 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
692 UPDATE_PN_CHECK_NEEDED, p->update_pn_check_needed);
693
694 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
695 UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even);
696
697 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
698 UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven);
699
700 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
701 UPDATE_PN_HANDLING_ENABLE, p->update_pn_hand_enab);
702
703 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
704 UPDATE_PN_SIZE, p->update_pn_size);
705
706 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
707 UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu);
708
709 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
710 UPDATE_SVLD, p->update_svld);
711
712 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
713 UPDATE_SSN, p->update_ssn);
714
715 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
716 UPDATE_SEQ_2K_ERROR_DETECTED_FLAG,
717 p->update_seq_2k_err_detect);
718
719 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
720 UPDATE_PN_VALID, p->update_pn_valid);
721
722 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
723 UPDATE_PN, p->update_pn);
724
725 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
726 RECEIVE_QUEUE_NUMBER, p->rx_queue_num);
727
728 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
729 VLD, p->vld);
730
731 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
732 ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
733 p->assoc_link_desc);
734
735 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
736 DISABLE_DUPLICATE_DETECTION, p->disable_dup_detect);
737
738 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
739 SOFT_REORDER_ENABLE, p->soft_reorder_enab);
740
741 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, AC, p->ac);
742
743 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
744 BAR, p->bar);
745
746 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
747 CHK_2K_MODE, p->chk_2k_mode);
748
749 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
750 RTY, p->rty);
751
752 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
753 OOR_MODE, p->oor_mode);
754
755 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
756 PN_CHECK_NEEDED, p->pn_check_needed);
757
758 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
759 PN_SHALL_BE_EVEN, p->pn_even);
760
761 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
762 PN_SHALL_BE_UNEVEN, p->pn_uneven);
763
764 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
765 PN_HANDLING_ENABLE, p->pn_hand_enab);
766
767 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
768 IGNORE_AMPDU_FLAG, p->ignore_ampdu);
769
770 if (p->ba_window_size < 1)
771 p->ba_window_size = 1;
772 /*
773 * WAR to get 2k exception in Non BA case.
774 * Setting window size to 2 to get 2k jump exception
775 * when we receive aggregates in Non BA case
776 */
777 if (p->ba_window_size == 1)
778 p->ba_window_size++;
779 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
780 BA_WINDOW_SIZE, p->ba_window_size - 1);
781
782 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
783 PN_SIZE, p->pn_size);
784
785 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
786 SVLD, p->svld);
787
788 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
789 SSN, p->ssn);
790
791 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
792 SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect);
793
794 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
795 PN_ERROR_DETECTED_FLAG, p->pn_err_detect);
796
797 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_5,
798 PN_31_0, p->pn_31_0);
799
800 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_6,
801 PN_63_32, p->pn_63_32);
802
803 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_7,
804 PN_95_64, p->pn_95_64);
805
806 HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_8,
807 PN_127_96, p->pn_127_96);
808
809 if (hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_HAL_REO_CMD) == 0) {
810 if (hif_system_pm_state_check(hal_soc->hif_handle)) {
811 hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
812 hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
813 hal_srng_inc_flush_cnt(hal_ring_hdl);
814 } else {
815 hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
816 }
817
818 hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_HAL_REO_CMD);
819 } else {
820 hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
821 hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
822 hal_srng_inc_flush_cnt(hal_ring_hdl);
823 }
824
825 val = reo_desc[CMD_HEADER_DW_OFFSET];
826 return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
827 val);
828 }
829
hal_reo_send_cmd_li(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl,enum hal_reo_cmd_type cmd,void * params)830 int hal_reo_send_cmd_li(hal_soc_handle_t hal_soc_hdl,
831 hal_ring_handle_t hal_ring_hdl,
832 enum hal_reo_cmd_type cmd,
833 void *params)
834 {
835 struct hal_reo_cmd_params *cmd_params =
836 (struct hal_reo_cmd_params *)params;
837 int num = 0;
838
839 switch (cmd) {
840 case CMD_GET_QUEUE_STATS:
841 num = hal_reo_cmd_queue_stats_li(hal_ring_hdl,
842 hal_soc_hdl, cmd_params);
843 break;
844 case CMD_FLUSH_QUEUE:
845 num = hal_reo_cmd_flush_queue_li(hal_ring_hdl,
846 hal_soc_hdl, cmd_params);
847 break;
848 case CMD_FLUSH_CACHE:
849 num = hal_reo_cmd_flush_cache_li(hal_ring_hdl,
850 hal_soc_hdl, cmd_params);
851 break;
852 case CMD_UNBLOCK_CACHE:
853 num = hal_reo_cmd_unblock_cache_li(hal_ring_hdl,
854 hal_soc_hdl, cmd_params);
855 break;
856 case CMD_FLUSH_TIMEOUT_LIST:
857 num = hal_reo_cmd_flush_timeout_list_li(hal_ring_hdl,
858 hal_soc_hdl,
859 cmd_params);
860 break;
861 case CMD_UPDATE_RX_REO_QUEUE:
862 num = hal_reo_cmd_update_rx_queue_li(hal_ring_hdl,
863 hal_soc_hdl, cmd_params);
864 break;
865 default:
866 hal_err("Invalid REO command type: %d", cmd);
867 return -EINVAL;
868 };
869
870 return num;
871 }
872
873 void
hal_reo_queue_stats_status_li(hal_ring_desc_t ring_desc,void * st_handle,hal_soc_handle_t hal_soc_hdl)874 hal_reo_queue_stats_status_li(hal_ring_desc_t ring_desc,
875 void *st_handle,
876 hal_soc_handle_t hal_soc_hdl)
877 {
878 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
879 struct hal_reo_queue_status *st =
880 (struct hal_reo_queue_status *)st_handle;
881 uint32_t *reo_desc = (uint32_t *)ring_desc;
882 uint32_t val;
883
884 /*
885 * Offsets of descriptor fields defined in HW headers start
886 * from the field after TLV header
887 */
888 reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
889
890 /* header */
891 hal_reo_status_get_header(ring_desc, HAL_REO_QUEUE_STATS_STATUS_TLV,
892 &(st->header), hal_soc);
893
894 /* SSN */
895 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, SSN)];
896 st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, SSN, val);
897
898 /* current index */
899 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2,
900 CURRENT_INDEX)];
901 st->curr_idx =
902 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2,
903 CURRENT_INDEX, val);
904
905 /* PN bits */
906 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_3,
907 PN_31_0)];
908 st->pn_31_0 =
909 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_3,
910 PN_31_0, val);
911
912 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_4,
913 PN_63_32)];
914 st->pn_63_32 =
915 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_4,
916 PN_63_32, val);
917
918 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_5,
919 PN_95_64)];
920 st->pn_95_64 =
921 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_5,
922 PN_95_64, val);
923
924 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_6,
925 PN_127_96)];
926 st->pn_127_96 =
927 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_6,
928 PN_127_96, val);
929
930 /* timestamps */
931 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_7,
932 LAST_RX_ENQUEUE_TIMESTAMP)];
933 st->last_rx_enq_tstamp =
934 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_7,
935 LAST_RX_ENQUEUE_TIMESTAMP, val);
936
937 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_8,
938 LAST_RX_DEQUEUE_TIMESTAMP)];
939 st->last_rx_deq_tstamp =
940 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_8,
941 LAST_RX_DEQUEUE_TIMESTAMP, val);
942
943 /* rx bitmap */
944 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_9,
945 RX_BITMAP_31_0)];
946 st->rx_bitmap_31_0 =
947 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_9,
948 RX_BITMAP_31_0, val);
949
950 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_10,
951 RX_BITMAP_63_32)];
952 st->rx_bitmap_63_32 =
953 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_10,
954 RX_BITMAP_63_32, val);
955
956 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_11,
957 RX_BITMAP_95_64)];
958 st->rx_bitmap_95_64 =
959 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_11,
960 RX_BITMAP_95_64, val);
961
962 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_12,
963 RX_BITMAP_127_96)];
964 st->rx_bitmap_127_96 =
965 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_12,
966 RX_BITMAP_127_96, val);
967
968 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_13,
969 RX_BITMAP_159_128)];
970 st->rx_bitmap_159_128 =
971 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_13,
972 RX_BITMAP_159_128, val);
973
974 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_14,
975 RX_BITMAP_191_160)];
976 st->rx_bitmap_191_160 =
977 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_14,
978 RX_BITMAP_191_160, val);
979
980 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_15,
981 RX_BITMAP_223_192)];
982 st->rx_bitmap_223_192 =
983 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_15,
984 RX_BITMAP_223_192, val);
985
986 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_16,
987 RX_BITMAP_255_224)];
988 st->rx_bitmap_255_224 =
989 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_16,
990 RX_BITMAP_255_224, val);
991
992 /* various counts */
993 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
994 CURRENT_MPDU_COUNT)];
995 st->curr_mpdu_cnt =
996 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
997 CURRENT_MPDU_COUNT, val);
998
999 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
1000 CURRENT_MSDU_COUNT)];
1001 st->curr_msdu_cnt =
1002 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
1003 CURRENT_MSDU_COUNT, val);
1004
1005 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1006 TIMEOUT_COUNT)];
1007 st->fwd_timeout_cnt =
1008 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1009 TIMEOUT_COUNT, val);
1010
1011 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1012 FORWARD_DUE_TO_BAR_COUNT)];
1013 st->fwd_bar_cnt =
1014 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1015 FORWARD_DUE_TO_BAR_COUNT, val);
1016
1017 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1018 DUPLICATE_COUNT)];
1019 st->dup_cnt =
1020 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1021 DUPLICATE_COUNT, val);
1022
1023 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
1024 FRAMES_IN_ORDER_COUNT)];
1025 st->frms_in_order_cnt =
1026 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
1027 FRAMES_IN_ORDER_COUNT, val);
1028
1029 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
1030 BAR_RECEIVED_COUNT)];
1031 st->bar_rcvd_cnt =
1032 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
1033 BAR_RECEIVED_COUNT, val);
1034
1035 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_20,
1036 MPDU_FRAMES_PROCESSED_COUNT)];
1037 st->mpdu_frms_cnt =
1038 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_20,
1039 MPDU_FRAMES_PROCESSED_COUNT, val);
1040
1041 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_21,
1042 MSDU_FRAMES_PROCESSED_COUNT)];
1043 st->msdu_frms_cnt =
1044 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_21,
1045 MSDU_FRAMES_PROCESSED_COUNT, val);
1046
1047 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_22,
1048 TOTAL_PROCESSED_BYTE_COUNT)];
1049 st->total_cnt =
1050 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_22,
1051 TOTAL_PROCESSED_BYTE_COUNT, val);
1052
1053 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1054 LATE_RECEIVE_MPDU_COUNT)];
1055 st->late_recv_mpdu_cnt =
1056 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1057 LATE_RECEIVE_MPDU_COUNT, val);
1058
1059 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1060 WINDOW_JUMP_2K)];
1061 st->win_jump_2k =
1062 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1063 WINDOW_JUMP_2K, val);
1064
1065 val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1066 HOLE_COUNT)];
1067 st->hole_cnt =
1068 HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1069 HOLE_COUNT, val);
1070 }
1071
1072 void
hal_reo_flush_queue_status_li(hal_ring_desc_t ring_desc,void * st_handle,hal_soc_handle_t hal_soc_hdl)1073 hal_reo_flush_queue_status_li(hal_ring_desc_t ring_desc,
1074 void *st_handle,
1075 hal_soc_handle_t hal_soc_hdl)
1076 {
1077 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1078 struct hal_reo_flush_queue_status *st =
1079 (struct hal_reo_flush_queue_status *)st_handle;
1080 uint32_t *reo_desc = (uint32_t *)ring_desc;
1081 uint32_t val;
1082
1083 /*
1084 * Offsets of descriptor fields defined in HW headers start
1085 * from the field after TLV header
1086 */
1087 reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1088
1089 /* header */
1090 hal_reo_status_get_header(ring_desc, HAL_REO_FLUSH_QUEUE_STATUS_TLV,
1091 &(st->header), hal_soc);
1092
1093 /* error bit */
1094 val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS_2,
1095 ERROR_DETECTED)];
1096 st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
1097 val);
1098 }
1099
1100 void
hal_reo_flush_cache_status_li(hal_ring_desc_t ring_desc,void * st_handle,hal_soc_handle_t hal_soc_hdl)1101 hal_reo_flush_cache_status_li(hal_ring_desc_t ring_desc,
1102 void *st_handle,
1103 hal_soc_handle_t hal_soc_hdl)
1104 {
1105 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1106 struct hal_reo_flush_cache_status *st =
1107 (struct hal_reo_flush_cache_status *)st_handle;
1108 uint32_t *reo_desc = (uint32_t *)ring_desc;
1109 uint32_t val;
1110
1111 /*
1112 * Offsets of descriptor fields defined in HW headers start
1113 * from the field after TLV header
1114 */
1115 reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1116
1117 /* header */
1118 hal_reo_status_get_header(ring_desc, HAL_REO_FLUSH_CACHE_STATUS_TLV,
1119 &(st->header), hal_soc);
1120
1121 /* error bit */
1122 val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1123 ERROR_DETECTED)];
1124 st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
1125 val);
1126
1127 /* block error */
1128 val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1129 BLOCK_ERROR_DETAILS)];
1130 st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1131 BLOCK_ERROR_DETAILS,
1132 val);
1133 if (!st->block_error)
1134 qdf_set_bit(hal_soc->index,
1135 (unsigned long *)&hal_soc->reo_res_bitmap);
1136
1137 /* cache flush status */
1138 val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1139 CACHE_CONTROLLER_FLUSH_STATUS_HIT)];
1140 st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1141 CACHE_CONTROLLER_FLUSH_STATUS_HIT,
1142 val);
1143
1144 /* cache flush descriptor type */
1145 val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1146 CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)];
1147 st->cache_flush_status_desc_type =
1148 HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1149 CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE,
1150 val);
1151
1152 /* cache flush count */
1153 val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1154 CACHE_CONTROLLER_FLUSH_COUNT)];
1155 st->cache_flush_cnt =
1156 HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1157 CACHE_CONTROLLER_FLUSH_COUNT,
1158 val);
1159 }
1160
1161 void
hal_reo_unblock_cache_status_li(hal_ring_desc_t ring_desc,hal_soc_handle_t hal_soc_hdl,void * st_handle)1162 hal_reo_unblock_cache_status_li(hal_ring_desc_t ring_desc,
1163 hal_soc_handle_t hal_soc_hdl,
1164 void *st_handle)
1165 {
1166 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1167 struct hal_reo_unblk_cache_status *st =
1168 (struct hal_reo_unblk_cache_status *)st_handle;
1169 uint32_t *reo_desc = (uint32_t *)ring_desc;
1170 uint32_t val;
1171
1172 /*
1173 * Offsets of descriptor fields defined in HW headers start
1174 * from the field after TLV header
1175 */
1176 reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1177
1178 /* header */
1179 hal_reo_status_get_header(ring_desc, HAL_REO_UNBLK_CACHE_STATUS_TLV,
1180 &st->header, hal_soc);
1181
1182 /* error bit */
1183 val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
1184 ERROR_DETECTED)];
1185 st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
1186 ERROR_DETECTED,
1187 val);
1188
1189 /* unblock type */
1190 val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
1191 UNBLOCK_TYPE)];
1192 st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
1193 UNBLOCK_TYPE,
1194 val);
1195
1196 if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX))
1197 qdf_clear_bit(hal_soc->index,
1198 (unsigned long *)&hal_soc->reo_res_bitmap);
1199 }
1200
hal_reo_flush_timeout_list_status_li(hal_ring_desc_t ring_desc,void * st_handle,hal_soc_handle_t hal_soc_hdl)1201 void hal_reo_flush_timeout_list_status_li(hal_ring_desc_t ring_desc,
1202 void *st_handle,
1203 hal_soc_handle_t hal_soc_hdl)
1204 {
1205 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1206 struct hal_reo_flush_timeout_list_status *st =
1207 (struct hal_reo_flush_timeout_list_status *)st_handle;
1208 uint32_t *reo_desc = (uint32_t *)ring_desc;
1209 uint32_t val;
1210
1211 /*
1212 * Offsets of descriptor fields defined in HW headers start
1213 * from the field after TLV header
1214 */
1215 reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1216
1217 /* header */
1218 hal_reo_status_get_header(ring_desc, HAL_REO_TIMOUT_LIST_STATUS_TLV,
1219 &(st->header), hal_soc);
1220
1221 /* error bit */
1222 val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1223 ERROR_DETECTED)];
1224 st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1225 ERROR_DETECTED,
1226 val);
1227
1228 /* list empty */
1229 val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1230 TIMOUT_LIST_EMPTY)];
1231 st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1232 TIMOUT_LIST_EMPTY,
1233 val);
1234
1235 /* release descriptor count */
1236 val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1237 RELEASE_DESC_COUNT)];
1238 st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1239 RELEASE_DESC_COUNT,
1240 val);
1241
1242 /* forward buf count */
1243 val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1244 FORWARD_BUF_COUNT)];
1245 st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1246 FORWARD_BUF_COUNT,
1247 val);
1248 }
1249
hal_reo_desc_thres_reached_status_li(hal_ring_desc_t ring_desc,void * st_handle,hal_soc_handle_t hal_soc_hdl)1250 void hal_reo_desc_thres_reached_status_li(hal_ring_desc_t ring_desc,
1251 void *st_handle,
1252 hal_soc_handle_t hal_soc_hdl)
1253 {
1254 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1255 struct hal_reo_desc_thres_reached_status *st =
1256 (struct hal_reo_desc_thres_reached_status *)st_handle;
1257 uint32_t *reo_desc = (uint32_t *)ring_desc;
1258 uint32_t val;
1259
1260 /*
1261 * Offsets of descriptor fields defined in HW headers start
1262 * from the field after TLV header
1263 */
1264 reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1265
1266 /* header */
1267 hal_reo_status_get_header(ring_desc,
1268 HAL_REO_DESC_THRES_STATUS_TLV,
1269 &(st->header), hal_soc);
1270
1271 /* threshold index */
1272 val = reo_desc[HAL_OFFSET_DW(
1273 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
1274 THRESHOLD_INDEX)];
1275 st->thres_index = HAL_GET_FIELD(
1276 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
1277 THRESHOLD_INDEX,
1278 val);
1279
1280 /* link desc counters */
1281 val = reo_desc[HAL_OFFSET_DW(
1282 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
1283 LINK_DESCRIPTOR_COUNTER0)];
1284 st->link_desc_counter0 = HAL_GET_FIELD(
1285 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
1286 LINK_DESCRIPTOR_COUNTER0,
1287 val);
1288
1289 val = reo_desc[HAL_OFFSET_DW(
1290 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
1291 LINK_DESCRIPTOR_COUNTER1)];
1292 st->link_desc_counter1 = HAL_GET_FIELD(
1293 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
1294 LINK_DESCRIPTOR_COUNTER1,
1295 val);
1296
1297 val = reo_desc[HAL_OFFSET_DW(
1298 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
1299 LINK_DESCRIPTOR_COUNTER2)];
1300 st->link_desc_counter2 = HAL_GET_FIELD(
1301 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
1302 LINK_DESCRIPTOR_COUNTER2,
1303 val);
1304
1305 val = reo_desc[HAL_OFFSET_DW(
1306 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
1307 LINK_DESCRIPTOR_COUNTER_SUM)];
1308 st->link_desc_counter_sum = HAL_GET_FIELD(
1309 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
1310 LINK_DESCRIPTOR_COUNTER_SUM,
1311 val);
1312 }
1313
1314 void
hal_reo_rx_update_queue_status_li(hal_ring_desc_t ring_desc,void * st_handle,hal_soc_handle_t hal_soc_hdl)1315 hal_reo_rx_update_queue_status_li(hal_ring_desc_t ring_desc,
1316 void *st_handle,
1317 hal_soc_handle_t hal_soc_hdl)
1318 {
1319 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1320 struct hal_reo_update_rx_queue_status *st =
1321 (struct hal_reo_update_rx_queue_status *)st_handle;
1322 uint32_t *reo_desc = (uint32_t *)ring_desc;
1323
1324 /*
1325 * Offsets of descriptor fields defined in HW headers start
1326 * from the field after TLV header
1327 */
1328 reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1329
1330 /* header */
1331 hal_reo_status_get_header(ring_desc,
1332 HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV,
1333 &(st->header), hal_soc);
1334 }
1335
hal_get_tlv_hdr_size_li(void)1336 uint8_t hal_get_tlv_hdr_size_li(void)
1337 {
1338 return sizeof(struct tlv_32_hdr);
1339 }
1340
hal_rx_get_qdesc_addr_li(uint8_t * dst_ring_desc,uint8_t * buf)1341 uint64_t hal_rx_get_qdesc_addr_li(uint8_t *dst_ring_desc, uint8_t *buf)
1342 {
1343 uint8_t *dst_qdesc_addr = dst_ring_desc +
1344 REO_DESTINATION_RING_6_RX_REO_QUEUE_DESC_ADDR_31_0_OFFSET;
1345
1346 return *(uint64_t *)dst_qdesc_addr;
1347 }
1348