xref: /wlan-driver/qca-wifi-host-cmn/hal/wifi3.0/hal_api.h (revision 5113495b16420b49004c444715d2daae2066e7dc) !
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _HAL_API_H_
21 #define _HAL_API_H_
22 
23 #include "qdf_types.h"
24 #include "qdf_util.h"
25 #include "qdf_atomic.h"
26 #include "hal_internal.h"
27 #include "hif.h"
28 #include "hif_io32.h"
29 #include "qdf_platform.h"
30 
31 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
32 #include "hal_hw_headers.h"
33 #endif
34 
35 /* Ring index for WBM2SW2 release ring */
36 #define HAL_IPA_TX_COMP_RING_IDX 2
37 
38 #if defined(CONFIG_SHADOW_V2) || defined(CONFIG_SHADOW_V3)
39 #define ignore_shadow false
40 #define CHECK_SHADOW_REGISTERS true
41 #else
42 #define ignore_shadow true
43 #define CHECK_SHADOW_REGISTERS false
44 #endif
45 
46 /*
47  * Indices for stats
48  */
49 enum RING_USAGE {
50 	RING_USAGE_100,
51 	RING_USAGE_GREAT_90,
52 	RING_USAGE_70_TO_90,
53 	RING_USAGE_50_TO_70,
54 	RING_USAGE_LESS_50,
55 	RING_USAGE_MAX,
56 };
57 
58 /*
59  * Structure for tracking ring utilization
60  */
61 struct ring_util_stats {
62 	uint32_t util[RING_USAGE_MAX];
63 };
64 
65 #define RING_USAGE_100_PERCENTAGE 100
66 #define RING_USAGE_50_PERCENTAGE   50
67 #define RING_USAGE_70_PERCENTAGE   70
68 #define RING_USAGE_90_PERCENTAGE   90
69 
70 /* calculate the register address offset from bar0 of shadow register x */
71 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
72     defined(QCA_WIFI_KIWI)
73 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x000008FC
74 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
75 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
76 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
77 #elif defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCN9000)
78 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x00003024
79 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
80 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
81 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
82 #elif defined(QCA_WIFI_QCA6750)
83 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x00000504
84 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
85 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
86 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
87 #else
88 #define SHADOW_REGISTER(x) 0
89 #endif /* QCA_WIFI_QCA6390 || QCA_WIFI_QCA6490 || QCA_WIFI_QCA6750 */
90 
91 /*
92  * BAR + 4K is always accessible, any access outside this
93  * space requires force wake procedure.
94  * OFFSET = 4K - 32 bytes = 0xFE0
95  */
96 #define MAPPED_REF_OFF 0xFE0
97 
98 #define HAL_OFFSET(block, field) block ## _ ## field ## _OFFSET
99 
100 #ifdef ENABLE_VERBOSE_DEBUG
101 static inline void
hal_set_verbose_debug(bool flag)102 hal_set_verbose_debug(bool flag)
103 {
104 	is_hal_verbose_debug_enabled = flag;
105 }
106 #endif
107 
108 #ifdef ENABLE_HAL_SOC_STATS
109 #define HAL_STATS_INC(_handle, _field, _delta) \
110 { \
111 	if (likely(_handle)) \
112 		_handle->stats._field += _delta; \
113 }
114 #else
115 #define HAL_STATS_INC(_handle, _field, _delta)
116 #endif
117 
118 #ifdef ENABLE_HAL_REG_WR_HISTORY
119 #define HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, wr_val, rd_val) \
120 	hal_reg_wr_fail_history_add(hal_soc, offset, wr_val, rd_val)
121 
122 void hal_reg_wr_fail_history_add(struct hal_soc *hal_soc,
123 				 uint32_t offset,
124 				 uint32_t wr_val,
125 				 uint32_t rd_val);
126 
hal_history_get_next_index(qdf_atomic_t * table_index,int array_size)127 static inline int hal_history_get_next_index(qdf_atomic_t *table_index,
128 					     int array_size)
129 {
130 	int record_index = qdf_atomic_inc_return(table_index);
131 
132 	return record_index & (array_size - 1);
133 }
134 #else
135 #define HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, wr_val, rd_val) \
136 	hal_err("write failed at reg offset 0x%x, write 0x%x read 0x%x", \
137 		offset,	\
138 		wr_val,	\
139 		rd_val)
140 #endif
141 
142 /**
143  * hal_reg_write_result_check() - check register writing result
144  * @hal_soc: HAL soc handle
145  * @offset: register offset to read
146  * @exp_val: the expected value of register
147  *
148  * Return: QDF_STATUS - Success or Failure
149  */
hal_reg_write_result_check(struct hal_soc * hal_soc,uint32_t offset,uint32_t exp_val)150 static inline QDF_STATUS hal_reg_write_result_check(struct hal_soc *hal_soc,
151 						    uint32_t offset,
152 						    uint32_t exp_val)
153 {
154 	uint32_t value;
155 
156 	value = qdf_ioread32(hal_soc->dev_base_addr + offset);
157 	if (qdf_unlikely(exp_val != value)) {
158 		HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, exp_val, value);
159 		HAL_STATS_INC(hal_soc, reg_write_fail, 1);
160 
161 		return QDF_STATUS_E_FAILURE;
162 	}
163 
164 	return QDF_STATUS_SUCCESS;
165 }
166 
167 #ifdef WINDOW_REG_PLD_LOCK_ENABLE
hal_lock_reg_access(struct hal_soc * soc,unsigned long * flags)168 static inline void hal_lock_reg_access(struct hal_soc *soc,
169 				       unsigned long *flags)
170 {
171 	pld_lock_reg_window(soc->qdf_dev->dev, flags);
172 }
173 
hal_unlock_reg_access(struct hal_soc * soc,unsigned long * flags)174 static inline void hal_unlock_reg_access(struct hal_soc *soc,
175 					 unsigned long *flags)
176 {
177 	pld_unlock_reg_window(soc->qdf_dev->dev, flags);
178 }
179 #else
hal_lock_reg_access(struct hal_soc * soc,unsigned long * flags)180 static inline void hal_lock_reg_access(struct hal_soc *soc,
181 				       unsigned long *flags)
182 {
183 	qdf_spin_lock_irqsave(&soc->register_access_lock);
184 }
185 
hal_unlock_reg_access(struct hal_soc * soc,unsigned long * flags)186 static inline void hal_unlock_reg_access(struct hal_soc *soc,
187 					 unsigned long *flags)
188 {
189 	qdf_spin_unlock_irqrestore(&soc->register_access_lock);
190 }
191 #endif
192 
193 #ifdef PCIE_REG_WINDOW_LOCAL_NO_CACHE
194 /**
195  * hal_select_window_confirm() - write remap window register and
196  *				 check writing result
197  * @hal_soc: hal soc handle
198  * @offset: offset to write
199  *
200  */
hal_select_window_confirm(struct hal_soc * hal_soc,uint32_t offset)201 static inline void hal_select_window_confirm(struct hal_soc *hal_soc,
202 					     uint32_t offset)
203 {
204 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
205 
206 	qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
207 		      WINDOW_ENABLE_BIT | window);
208 	hal_soc->register_window = window;
209 
210 	hal_reg_write_result_check(hal_soc, WINDOW_REG_ADDRESS,
211 				   WINDOW_ENABLE_BIT | window);
212 }
213 #else
hal_select_window_confirm(struct hal_soc * hal_soc,uint32_t offset)214 static inline void hal_select_window_confirm(struct hal_soc *hal_soc,
215 					     uint32_t offset)
216 {
217 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
218 
219 	if (window != hal_soc->register_window) {
220 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
221 			      WINDOW_ENABLE_BIT | window);
222 		hal_soc->register_window = window;
223 
224 		hal_reg_write_result_check(
225 					hal_soc,
226 					WINDOW_REG_ADDRESS,
227 					WINDOW_ENABLE_BIT | window);
228 	}
229 }
230 #endif
231 
hal_get_window_address(struct hal_soc * hal_soc,qdf_iomem_t addr)232 static inline qdf_iomem_t hal_get_window_address(struct hal_soc *hal_soc,
233 						 qdf_iomem_t addr)
234 {
235 	return hal_soc->ops->hal_get_window_address(hal_soc, addr);
236 }
237 
hal_tx_init_cmd_credit_ring(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl)238 static inline void hal_tx_init_cmd_credit_ring(hal_soc_handle_t hal_soc_hdl,
239 					       hal_ring_handle_t hal_ring_hdl)
240 {
241 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
242 
243 	return hal_soc->ops->hal_tx_init_cmd_credit_ring(hal_soc_hdl,
244 							 hal_ring_hdl);
245 }
246 
247 /**
248  * hal_write32_mb() - Access registers to update configuration
249  * @hal_soc: hal soc handle
250  * @offset: offset address from the BAR
251  * @value: value to write
252  *
253  * Return: None
254  *
255  * Description: Register address space is split below:
256  *     SHADOW REGION       UNWINDOWED REGION    WINDOWED REGION
257  *  |--------------------|-------------------|------------------|
258  * BAR  NO FORCE WAKE  BAR+4K  FORCE WAKE  BAR+512K  FORCE WAKE
259  *
260  * 1. Any access to the shadow region, doesn't need force wake
261  *    and windowing logic to access.
262  * 2. Any access beyond BAR + 4K:
263  *    If init_phase enabled, no force wake is needed and access
264  *    should be based on windowed or unwindowed access.
265  *    If init_phase disabled, force wake is needed and access
266  *    should be based on windowed or unwindowed access.
267  *
268  * note1: WINDOW_RANGE_MASK = (1 << WINDOW_SHIFT) -1
269  * note2: 1 << WINDOW_SHIFT = MAX_UNWINDOWED_ADDRESS
270  * note3: WINDOW_VALUE_MASK = big enough that trying to write past
271  *                            that window would be a bug
272  */
273 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) && \
274     !defined(QCA_WIFI_QCA6750) && !defined(QCA_WIFI_KIWI) && \
275     !defined(QCA_WIFI_WCN6450)
hal_write32_mb(struct hal_soc * hal_soc,uint32_t offset,uint32_t value)276 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
277 				  uint32_t value)
278 {
279 	unsigned long flags;
280 	qdf_iomem_t new_addr;
281 
282 	if (!hal_soc->use_register_windowing ||
283 	    offset < MAX_UNWINDOWED_ADDRESS) {
284 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
285 	} else if (hal_soc->static_window_map) {
286 		new_addr = hal_get_window_address(hal_soc,
287 				hal_soc->dev_base_addr + offset);
288 		qdf_iowrite32(new_addr, value);
289 	} else {
290 		hal_lock_reg_access(hal_soc, &flags);
291 		hal_select_window_confirm(hal_soc, offset);
292 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
293 			  (offset & WINDOW_RANGE_MASK), value);
294 		hal_unlock_reg_access(hal_soc, &flags);
295 	}
296 }
297 
298 /**
299  * hal_write32_mb_confirm() - write register and check writing result
300  * @hal_soc: hal soc handle
301  * @offset: I/O memory address to write
302  * @value: value to write
303  *
304  * Return: QDF_STATUS - return E_NOSUPPORT as no read back confirmation
305  */
hal_write32_mb_confirm(struct hal_soc * hal_soc,uint32_t offset,uint32_t value)306 static inline QDF_STATUS hal_write32_mb_confirm(struct hal_soc *hal_soc,
307 						uint32_t offset,
308 						uint32_t value)
309 {
310 	hal_write32_mb(hal_soc, offset, value);
311 	return QDF_STATUS_E_NOSUPPORT;
312 }
313 
314 #define hal_write32_mb_cmem(_hal_soc, _offset, _value)
315 #else
hal_write32_mb(struct hal_soc * hal_soc,uint32_t offset,uint32_t value)316 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
317 				  uint32_t value)
318 {
319 	unsigned long flags;
320 	qdf_iomem_t new_addr;
321 	bool init_phase;
322 
323 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
324 					hal_soc->hif_handle))) {
325 		hal_err_rl("target access is not allowed");
326 		return;
327 	}
328 
329 	/* Region < BAR + 4K can be directly accessed */
330 	if (offset < MAPPED_REF_OFF) {
331 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
332 		return;
333 	}
334 
335 	init_phase = hal_soc->init_phase;
336 
337 	/* Region greater than BAR + 4K */
338 	if (!init_phase && hif_force_wake_request(hal_soc->hif_handle)) {
339 		hal_err_rl("Wake up request failed");
340 		qdf_check_state_before_panic(__func__, __LINE__);
341 		return;
342 	}
343 
344 	if (!hal_soc->use_register_windowing ||
345 	    offset < MAX_UNWINDOWED_ADDRESS) {
346 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
347 	} else if (hal_soc->static_window_map) {
348 		new_addr = hal_get_window_address(
349 					hal_soc,
350 					hal_soc->dev_base_addr + offset);
351 		qdf_iowrite32(new_addr, value);
352 	} else {
353 		hal_lock_reg_access(hal_soc, &flags);
354 		hal_select_window_confirm(hal_soc, offset);
355 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
356 			  (offset & WINDOW_RANGE_MASK), value);
357 		hal_unlock_reg_access(hal_soc, &flags);
358 	}
359 
360 	if (!init_phase && hif_force_wake_release(hal_soc->hif_handle)) {
361 		hal_err("Wake up release failed");
362 		qdf_check_state_before_panic(__func__, __LINE__);
363 		return;
364 	}
365 }
366 
367 /**
368  * hal_write32_mb_confirm() - write register and check writing result
369  * @hal_soc: hal soc handle
370  * @offset: I/O memory address to write
371  * @value: value to write
372  *
373  * Return: QDF_STATUS - Success or Failure
374  */
hal_write32_mb_confirm(struct hal_soc * hal_soc,uint32_t offset,uint32_t value)375 static inline QDF_STATUS hal_write32_mb_confirm(struct hal_soc *hal_soc,
376 						uint32_t offset,
377 						uint32_t value)
378 {
379 	unsigned long flags;
380 	qdf_iomem_t new_addr;
381 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
382 	bool init_phase;
383 
384 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
385 					hal_soc->hif_handle))) {
386 		hal_err_rl("target access is not allowed");
387 		return status;
388 	}
389 
390 	/* Region < BAR + 4K can be directly accessed */
391 	if (offset < MAPPED_REF_OFF) {
392 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
393 		return QDF_STATUS_E_NOSUPPORT;
394 	}
395 
396 	init_phase = hal_soc->init_phase;
397 
398 	/* Region greater than BAR + 4K */
399 	if (!init_phase && hif_force_wake_request(hal_soc->hif_handle)) {
400 		hal_err("Wake up request failed");
401 		qdf_check_state_before_panic(__func__, __LINE__);
402 		return status;
403 	}
404 
405 	if (!hal_soc->use_register_windowing ||
406 	    offset < MAX_UNWINDOWED_ADDRESS) {
407 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
408 		status = hal_reg_write_result_check(hal_soc, offset,
409 						    value);
410 	} else if (hal_soc->static_window_map) {
411 		new_addr = hal_get_window_address(
412 					hal_soc,
413 					hal_soc->dev_base_addr + offset);
414 		qdf_iowrite32(new_addr, value);
415 		status = hal_reg_write_result_check(
416 					hal_soc,
417 					new_addr - hal_soc->dev_base_addr,
418 					value);
419 	} else {
420 		hal_lock_reg_access(hal_soc, &flags);
421 		hal_select_window_confirm(hal_soc, offset);
422 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
423 			  (offset & WINDOW_RANGE_MASK), value);
424 
425 		status = hal_reg_write_result_check(
426 				hal_soc,
427 				WINDOW_START + (offset & WINDOW_RANGE_MASK),
428 				value);
429 		hal_unlock_reg_access(hal_soc, &flags);
430 	}
431 
432 	if (!init_phase && hif_force_wake_release(hal_soc->hif_handle)) {
433 		hal_err("Wake up release failed");
434 		qdf_check_state_before_panic(__func__, __LINE__);
435 		return QDF_STATUS_E_INVAL;
436 	}
437 
438 	return status;
439 }
440 
441 /**
442  * hal_write32_mb_cmem() - write CMEM
443  * @hal_soc: hal soc handle
444  * @offset: offset into CMEM to write
445  * @value: value to write
446  */
hal_write32_mb_cmem(struct hal_soc * hal_soc,uint32_t offset,uint32_t value)447 static inline void hal_write32_mb_cmem(struct hal_soc *hal_soc, uint32_t offset,
448 				       uint32_t value)
449 {
450 	unsigned long flags;
451 	qdf_iomem_t new_addr;
452 
453 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
454 					hal_soc->hif_handle))) {
455 		hal_err_rl("%s: target access is not allowed", __func__);
456 		return;
457 	}
458 
459 	if (!hal_soc->use_register_windowing ||
460 	    offset < MAX_UNWINDOWED_ADDRESS) {
461 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
462 	} else if (hal_soc->static_window_map) {
463 		new_addr = hal_get_window_address(
464 					hal_soc,
465 					hal_soc->dev_base_addr + offset);
466 		qdf_iowrite32(new_addr, value);
467 	} else {
468 		hal_lock_reg_access(hal_soc, &flags);
469 		hal_select_window_confirm(hal_soc, offset);
470 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
471 			  (offset & WINDOW_RANGE_MASK), value);
472 		hal_unlock_reg_access(hal_soc, &flags);
473 	}
474 }
475 #endif
476 
477 /**
478  * hal_write_address_32_mb() - write a value to a register
479  * @hal_soc: hal soc handle
480  * @addr: I/O memory address to write
481  * @value: value to write
482  * @wr_confirm: true if read back confirmation is required
483  */
484 static inline
hal_write_address_32_mb(struct hal_soc * hal_soc,qdf_iomem_t addr,uint32_t value,bool wr_confirm)485 void hal_write_address_32_mb(struct hal_soc *hal_soc,
486 			     qdf_iomem_t addr, uint32_t value, bool wr_confirm)
487 {
488 	uint32_t offset;
489 
490 	if (!hal_soc->use_register_windowing)
491 		return qdf_iowrite32(addr, value);
492 
493 	offset = addr - hal_soc->dev_base_addr;
494 
495 	if (qdf_unlikely(wr_confirm))
496 		hal_write32_mb_confirm(hal_soc, offset, value);
497 	else
498 		hal_write32_mb(hal_soc, offset, value);
499 }
500 
501 
502 #ifdef DP_HAL_MULTIWINDOW_DIRECT_ACCESS
hal_srng_write_address_32_mb(struct hal_soc * hal_soc,struct hal_srng * srng,void __iomem * addr,uint32_t value)503 static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc,
504 						struct hal_srng *srng,
505 						void __iomem *addr,
506 						uint32_t value)
507 {
508 	qdf_iowrite32(addr, value);
509 	hal_srng_reg_his_add(srng, value);
510 }
511 #elif defined(FEATURE_HAL_DELAYED_REG_WRITE)
hal_srng_write_address_32_mb(struct hal_soc * hal_soc,struct hal_srng * srng,void __iomem * addr,uint32_t value)512 static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc,
513 						struct hal_srng *srng,
514 						void __iomem *addr,
515 						uint32_t value)
516 {
517 	hal_delayed_reg_write(hal_soc, srng, addr, value);
518 }
519 #else
hal_srng_write_address_32_mb(struct hal_soc * hal_soc,struct hal_srng * srng,void __iomem * addr,uint32_t value)520 static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc,
521 						struct hal_srng *srng,
522 						void __iomem *addr,
523 						uint32_t value)
524 {
525 	hal_write_address_32_mb(hal_soc, addr, value, false);
526 	hal_srng_reg_his_add(srng, value);
527 }
528 #endif
529 
530 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) && \
531     !defined(QCA_WIFI_QCA6750) && !defined(QCA_WIFI_KIWI) && \
532     !defined(QCA_WIFI_WCN6450)
533 /**
534  * hal_read32_mb() - Access registers to read configuration
535  * @hal_soc: hal soc handle
536  * @offset: offset address from the BAR
537  *
538  * Description: Register address space is split below:
539  *     SHADOW REGION       UNWINDOWED REGION    WINDOWED REGION
540  *  |--------------------|-------------------|------------------|
541  * BAR  NO FORCE WAKE  BAR+4K  FORCE WAKE  BAR+512K  FORCE WAKE
542  *
543  * 1. Any access to the shadow region, doesn't need force wake
544  *    and windowing logic to access.
545  * 2. Any access beyond BAR + 4K:
546  *    If init_phase enabled, no force wake is needed and access
547  *    should be based on windowed or unwindowed access.
548  *    If init_phase disabled, force wake is needed and access
549  *    should be based on windowed or unwindowed access.
550  *
551  * Return: value read
552  */
hal_read32_mb(struct hal_soc * hal_soc,uint32_t offset)553 static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
554 {
555 	uint32_t ret;
556 	unsigned long flags;
557 	qdf_iomem_t new_addr;
558 
559 	if (!hal_soc->use_register_windowing ||
560 	    offset < MAX_UNWINDOWED_ADDRESS) {
561 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
562 	} else if (hal_soc->static_window_map) {
563 		new_addr = hal_get_window_address(hal_soc, hal_soc->dev_base_addr + offset);
564 		return qdf_ioread32(new_addr);
565 	}
566 
567 	hal_lock_reg_access(hal_soc, &flags);
568 	hal_select_window_confirm(hal_soc, offset);
569 	ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
570 		       (offset & WINDOW_RANGE_MASK));
571 	hal_unlock_reg_access(hal_soc, &flags);
572 
573 	return ret;
574 }
575 
576 #define hal_read32_mb_cmem(_hal_soc, _offset)
577 #else
578 static
hal_read32_mb(struct hal_soc * hal_soc,uint32_t offset)579 uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
580 {
581 	uint32_t ret;
582 	unsigned long flags;
583 	qdf_iomem_t new_addr;
584 	bool init_phase;
585 
586 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
587 					hal_soc->hif_handle))) {
588 		hal_err_rl("target access is not allowed");
589 		return 0;
590 	}
591 
592 	/* Region < BAR + 4K can be directly accessed */
593 	if (offset < MAPPED_REF_OFF)
594 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
595 
596 	init_phase = hal_soc->init_phase;
597 	if (!init_phase && hif_force_wake_request(hal_soc->hif_handle)) {
598 		hal_err("Wake up request failed");
599 		qdf_check_state_before_panic(__func__, __LINE__);
600 		return 0;
601 	}
602 
603 	if (!hal_soc->use_register_windowing ||
604 	    offset < MAX_UNWINDOWED_ADDRESS) {
605 		ret = qdf_ioread32(hal_soc->dev_base_addr + offset);
606 	} else if (hal_soc->static_window_map) {
607 		new_addr = hal_get_window_address(
608 					hal_soc,
609 					hal_soc->dev_base_addr + offset);
610 		ret = qdf_ioread32(new_addr);
611 	} else {
612 		hal_lock_reg_access(hal_soc, &flags);
613 		hal_select_window_confirm(hal_soc, offset);
614 		ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
615 			       (offset & WINDOW_RANGE_MASK));
616 		hal_unlock_reg_access(hal_soc, &flags);
617 	}
618 
619 	if (!init_phase && hif_force_wake_release(hal_soc->hif_handle)) {
620 		hal_err("Wake up release failed");
621 		qdf_check_state_before_panic(__func__, __LINE__);
622 		return 0;
623 	}
624 
625 	return ret;
626 }
627 
628 static inline
hal_read32_mb_cmem(struct hal_soc * hal_soc,uint32_t offset)629 uint32_t hal_read32_mb_cmem(struct hal_soc *hal_soc, uint32_t offset)
630 {
631 	uint32_t ret;
632 	unsigned long flags;
633 	qdf_iomem_t new_addr;
634 
635 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
636 					hal_soc->hif_handle))) {
637 		hal_err_rl("%s: target access is not allowed", __func__);
638 		return 0;
639 	}
640 
641 	if (!hal_soc->use_register_windowing ||
642 	    offset < MAX_UNWINDOWED_ADDRESS) {
643 		ret = qdf_ioread32(hal_soc->dev_base_addr + offset);
644 	} else if (hal_soc->static_window_map) {
645 		new_addr = hal_get_window_address(
646 					hal_soc,
647 					hal_soc->dev_base_addr + offset);
648 		ret = qdf_ioread32(new_addr);
649 	} else {
650 		hal_lock_reg_access(hal_soc, &flags);
651 		hal_select_window_confirm(hal_soc, offset);
652 		ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
653 			       (offset & WINDOW_RANGE_MASK));
654 		hal_unlock_reg_access(hal_soc, &flags);
655 	}
656 	return ret;
657 }
658 #endif
659 
660 /* Max times allowed for register writing retry */
661 #define HAL_REG_WRITE_RETRY_MAX		5
662 /* Delay milliseconds for each time retry */
663 #define HAL_REG_WRITE_RETRY_DELAY	1
664 
665 #ifdef GENERIC_SHADOW_REGISTER_ACCESS_ENABLE
666 /* To check shadow config index range between 0..31 */
667 #define HAL_SHADOW_REG_INDEX_LOW 32
668 /* To check shadow config index range between 32..39 */
669 #define HAL_SHADOW_REG_INDEX_HIGH 40
670 /* Dirty bit reg offsets corresponding to shadow config index */
671 #define HAL_SHADOW_REG_DIRTY_BIT_DATA_LOW_OFFSET 0x30C8
672 #define HAL_SHADOW_REG_DIRTY_BIT_DATA_HIGH_OFFSET 0x30C4
673 /* PCIE_PCIE_TOP base addr offset */
674 #define HAL_PCIE_PCIE_TOP_WRAPPER 0x01E00000
675 /* Max retry attempts to read the dirty bit reg */
676 #ifdef HAL_CONFIG_SLUB_DEBUG_ON
677 #define HAL_SHADOW_DIRTY_BIT_POLL_MAX 10000
678 #else
679 #define HAL_SHADOW_DIRTY_BIT_POLL_MAX 2000
680 #endif
681 /* Delay in usecs for polling dirty bit reg */
682 #define HAL_SHADOW_DIRTY_BIT_POLL_DELAY 5
683 
684 /**
685  * hal_poll_dirty_bit_reg() - Poll dirty register bit to confirm
686  * write was successful
687  * @hal: hal soc handle
688  * @shadow_config_index: index of shadow reg used to confirm
689  * write
690  *
691  * Return: QDF_STATUS_SUCCESS on success
692  */
hal_poll_dirty_bit_reg(struct hal_soc * hal,int shadow_config_index)693 static inline QDF_STATUS hal_poll_dirty_bit_reg(struct hal_soc *hal,
694 						int shadow_config_index)
695 {
696 	uint32_t read_value = 0;
697 	int retry_cnt = 0;
698 	uint32_t reg_offset = 0;
699 
700 	if (shadow_config_index > 0 &&
701 	    shadow_config_index < HAL_SHADOW_REG_INDEX_LOW) {
702 		reg_offset =
703 			HAL_SHADOW_REG_DIRTY_BIT_DATA_LOW_OFFSET;
704 	} else if (shadow_config_index >= HAL_SHADOW_REG_INDEX_LOW &&
705 		   shadow_config_index < HAL_SHADOW_REG_INDEX_HIGH) {
706 		reg_offset =
707 			HAL_SHADOW_REG_DIRTY_BIT_DATA_HIGH_OFFSET;
708 	} else {
709 		hal_err("Invalid shadow_config_index = %d",
710 			shadow_config_index);
711 		return QDF_STATUS_E_INVAL;
712 	}
713 	while (retry_cnt < HAL_SHADOW_DIRTY_BIT_POLL_MAX) {
714 		read_value = hal_read32_mb(
715 				hal, HAL_PCIE_PCIE_TOP_WRAPPER + reg_offset);
716 		/* Check if dirty bit corresponding to shadow_index is set */
717 		if (read_value & BIT(shadow_config_index)) {
718 			/* Dirty reg bit not reset */
719 			qdf_udelay(HAL_SHADOW_DIRTY_BIT_POLL_DELAY);
720 			retry_cnt++;
721 		} else {
722 			hal_debug("Shadow write: offset 0x%x read val 0x%x",
723 				  reg_offset, read_value);
724 			return QDF_STATUS_SUCCESS;
725 		}
726 	}
727 	return QDF_STATUS_E_TIMEOUT;
728 }
729 
730 /**
731  * hal_write32_mb_shadow_confirm() - write to shadow reg and
732  * poll dirty register bit to confirm write
733  * @hal: hal soc handle
734  * @reg_offset: target reg offset address from BAR
735  * @value: value to write
736  *
737  * Return: QDF_STATUS_SUCCESS on success
738  */
hal_write32_mb_shadow_confirm(struct hal_soc * hal,uint32_t reg_offset,uint32_t value)739 static inline QDF_STATUS hal_write32_mb_shadow_confirm(
740 	struct hal_soc *hal,
741 	uint32_t reg_offset,
742 	uint32_t value)
743 {
744 	int i;
745 	QDF_STATUS ret;
746 	uint32_t shadow_reg_offset;
747 	int shadow_config_index;
748 	bool is_reg_offset_present = false;
749 
750 	for (i = 0; i < MAX_GENERIC_SHADOW_REG; i++) {
751 		/* Found the shadow config for the reg_offset */
752 		struct shadow_reg_config *hal_shadow_reg_list =
753 			&hal->list_shadow_reg_config[i];
754 		if (hal_shadow_reg_list->target_register ==
755 			reg_offset) {
756 			shadow_config_index =
757 				hal_shadow_reg_list->shadow_config_index;
758 			shadow_reg_offset =
759 				SHADOW_REGISTER(shadow_config_index);
760 			hal_write32_mb_confirm(
761 				hal, shadow_reg_offset, value);
762 			is_reg_offset_present = true;
763 			break;
764 		}
765 		ret = QDF_STATUS_E_FAILURE;
766 	}
767 	if (is_reg_offset_present) {
768 		ret = hal_poll_dirty_bit_reg(hal, shadow_config_index);
769 		hal_info("Shadow write:reg 0x%x val 0x%x ret %d",
770 			 reg_offset, value, ret);
771 		if (QDF_IS_STATUS_ERROR(ret)) {
772 			HAL_STATS_INC(hal, shadow_reg_write_fail, 1);
773 			return ret;
774 		}
775 		HAL_STATS_INC(hal, shadow_reg_write_succ, 1);
776 	}
777 	return ret;
778 }
779 
780 /**
781  * hal_write32_mb_confirm_retry() - write register with confirming and
782  *				    do retry/recovery if writing failed
783  * @hal_soc: hal soc handle
784  * @offset: offset address from the BAR
785  * @value: value to write
786  * @recovery: is recovery needed or not.
787  *
788  * Write the register value with confirming and read it back, if
789  * read back value is not as expected, do retry for writing, if
790  * retry hit max times allowed but still fail, check if recovery
791  * needed.
792  *
793  * Return: None
794  */
hal_write32_mb_confirm_retry(struct hal_soc * hal_soc,uint32_t offset,uint32_t value,bool recovery)795 static inline void hal_write32_mb_confirm_retry(struct hal_soc *hal_soc,
796 						uint32_t offset,
797 						uint32_t value,
798 						bool recovery)
799 {
800 	QDF_STATUS ret;
801 
802 	ret = hal_write32_mb_shadow_confirm(hal_soc, offset, value);
803 	if (QDF_IS_STATUS_ERROR(ret) && recovery)
804 		qdf_trigger_self_recovery(NULL, QDF_HAL_REG_WRITE_FAILURE);
805 }
806 #else /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */
807 
hal_write32_mb_confirm_retry(struct hal_soc * hal_soc,uint32_t offset,uint32_t value,bool recovery)808 static inline void hal_write32_mb_confirm_retry(struct hal_soc *hal_soc,
809 						uint32_t offset,
810 						uint32_t value,
811 						bool recovery)
812 {
813 	uint8_t retry_cnt = 0;
814 	uint32_t read_value;
815 	QDF_STATUS ret;
816 
817 	while (retry_cnt <= HAL_REG_WRITE_RETRY_MAX) {
818 		ret = hal_write32_mb_confirm(hal_soc, offset, value);
819 		/* Positive confirmation, return directly */
820 		if (qdf_likely(QDF_IS_STATUS_SUCCESS(ret)))
821 			return;
822 
823 		read_value = hal_read32_mb(hal_soc, offset);
824 		if (qdf_likely(read_value == value))
825 			break;
826 
827 		/* write failed, do retry */
828 		hal_warn("Retry reg offset 0x%x, value 0x%x, read value 0x%x",
829 			 offset, value, read_value);
830 		qdf_mdelay(HAL_REG_WRITE_RETRY_DELAY);
831 		retry_cnt++;
832 	}
833 
834 	if (retry_cnt > HAL_REG_WRITE_RETRY_MAX && recovery)
835 		qdf_trigger_self_recovery(NULL, QDF_HAL_REG_WRITE_FAILURE);
836 }
837 #endif /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */
838 
839 #if defined(FEATURE_HAL_DELAYED_REG_WRITE)
840 /**
841  * hal_dump_reg_write_srng_stats() - dump SRNG reg write stats
842  * @hal_soc_hdl: HAL soc handle
843  *
844  * Return: none
845  */
846 void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl);
847 
848 /**
849  * hal_dump_reg_write_stats() - dump reg write stats
850  * @hal_soc_hdl: HAL soc handle
851  *
852  * Return: none
853  */
854 void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl);
855 
856 /**
857  * hal_get_reg_write_pending_work() - get the number of entries
858  *		pending in the workqueue to be processed.
859  * @hal_soc: HAL soc handle
860  *
861  * Returns: the number of entries pending to be processed
862  */
863 int hal_get_reg_write_pending_work(void *hal_soc);
864 
865 #else
hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl)866 static inline void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl)
867 {
868 }
869 
hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl)870 static inline void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl)
871 {
872 }
873 
hal_get_reg_write_pending_work(void * hal_soc)874 static inline int hal_get_reg_write_pending_work(void *hal_soc)
875 {
876 	return 0;
877 }
878 #endif
879 
880 #if defined(FEATURE_HAL_DELAYED_REG_WRITE) && defined(QCA_WIFI_QCA6750)
881 /**
882  * hal_srng_check_and_update_hptp() - Check and force update HP/TP
883  *		to the hardware
884  * @hal_soc: HAL soc handle
885  * @srng: SRNG handle
886  * @update: Whether or not update is needed
887  *
888  * Returns: void
889  */
890 void hal_srng_check_and_update_hptp(struct hal_soc *hal_soc,
891 				    struct hal_srng *srng,
892 				    bool update);
893 #else
894 static inline void
hal_srng_check_and_update_hptp(struct hal_soc * hal_soc,struct hal_srng * srng,bool update)895 hal_srng_check_and_update_hptp(struct hal_soc *hal_soc, struct hal_srng *srng,
896 			       bool update)
897 {
898 }
899 #endif
900 
901 /**
902  * hal_read_address_32_mb() - Read 32-bit value from the register
903  * @soc: soc handle
904  * @addr: register address to read
905  *
906  * Return: 32-bit value
907  */
908 static inline
hal_read_address_32_mb(struct hal_soc * soc,qdf_iomem_t addr)909 uint32_t hal_read_address_32_mb(struct hal_soc *soc,
910 				qdf_iomem_t addr)
911 {
912 	uint32_t offset;
913 	uint32_t ret;
914 
915 	if (!soc->use_register_windowing)
916 		return qdf_ioread32(addr);
917 
918 	offset = addr - soc->dev_base_addr;
919 	ret = hal_read32_mb(soc, offset);
920 	return ret;
921 }
922 
923 /**
924  * hal_attach() - Initialize HAL layer
925  * @hif_handle: Opaque HIF handle
926  * @qdf_dev: QDF device
927  *
928  * This function should be called as part of HIF initialization (for accessing
929  * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
930  *
931  * Return: Opaque HAL SOC handle
932  *		 NULL on failure (if given ring is not available)
933  */
934 void *hal_attach(struct hif_opaque_softc *hif_handle, qdf_device_t qdf_dev);
935 
936 /**
937  * hal_detach() - Detach HAL layer
938  * @hal_soc: HAL SOC handle
939  *
940  * This function should be called as part of HIF detach
941  *
942  */
943 void hal_detach(void *hal_soc);
944 
945 #define HAL_SRNG_LMAC_RING 0x80000000
946 /* SRNG flags passed in hal_srng_params.flags */
947 #define HAL_SRNG_MSI_SWAP				0x00000008
948 #define HAL_SRNG_RING_PTR_SWAP			0x00000010
949 #define HAL_SRNG_DATA_TLV_SWAP			0x00000020
950 #define HAL_SRNG_LOW_THRES_INTR_ENABLE	0x00010000
951 #define HAL_SRNG_MSI_INTR				0x00020000
952 #define HAL_SRNG_CACHED_DESC		0x00040000
953 
954 #if defined(QCA_WIFI_QCA6490)  || defined(QCA_WIFI_KIWI)
955 #define HAL_SRNG_PREFETCH_TIMER 1
956 #else
957 #define HAL_SRNG_PREFETCH_TIMER 0
958 #endif
959 
960 #define PN_SIZE_24 0
961 #define PN_SIZE_48 1
962 #define PN_SIZE_128 2
963 
964 #ifdef FORCE_WAKE
965 /**
966  * hal_set_init_phase() - Indicate initialization of
967  *                        datapath rings
968  * @soc: hal_soc handle
969  * @init_phase: flag to indicate datapath rings
970  *              initialization status
971  *
972  * Return: None
973  */
974 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase);
975 #else
976 static inline
hal_set_init_phase(hal_soc_handle_t soc,bool init_phase)977 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase)
978 {
979 }
980 #endif /* FORCE_WAKE */
981 
982 /**
983  * hal_srng_get_entrysize() - Returns size of ring entry in bytes.
984  * @hal_soc: Opaque HAL SOC handle
985  * @ring_type: one of the types from hal_ring_type
986  *
987  * Should be used by callers for calculating the size of memory to be
988  * allocated before calling hal_srng_setup to setup the ring
989  *
990  * Return: ring entry size
991  */
992 uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type);
993 
994 /**
995  * hal_srng_max_entries() - Returns maximum possible number of ring entries
996  * @hal_soc: Opaque HAL SOC handle
997  * @ring_type: one of the types from hal_ring_type
998  *
999  * Return: Maximum number of entries for the given ring_type
1000  */
1001 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type);
1002 
1003 void hal_set_low_threshold(hal_ring_handle_t hal_ring_hdl,
1004 				 uint32_t low_threshold);
1005 
1006 /**
1007  * hal_srng_dump() - Dump ring status
1008  * @srng: hal srng pointer
1009  */
1010 void hal_srng_dump(struct hal_srng *srng);
1011 
1012 /**
1013  * hal_srng_get_dir() - Returns the direction of the ring
1014  * @hal_soc: Opaque HAL SOC handle
1015  * @ring_type: one of the types from hal_ring_type
1016  *
1017  * Return: Ring direction
1018  */
1019 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type);
1020 
1021 /* HAL memory information */
1022 struct hal_mem_info {
1023 	/* dev base virtual addr */
1024 	void *dev_base_addr;
1025 	/* dev base physical addr */
1026 	void *dev_base_paddr;
1027 	/* dev base ce virtual addr - applicable only for qca5018  */
1028 	/* In qca5018 CE register are outside wcss block */
1029 	/* using a separate address space to access CE registers */
1030 	void *dev_base_addr_ce;
1031 	/* dev base ce physical addr */
1032 	void *dev_base_paddr_ce;
1033 	/* Remote virtual pointer memory for HW/FW updates */
1034 	void *shadow_rdptr_mem_vaddr;
1035 	/* Remote physical pointer memory for HW/FW updates */
1036 	void *shadow_rdptr_mem_paddr;
1037 	/* Shared memory for ring pointer updates from host to FW */
1038 	void *shadow_wrptr_mem_vaddr;
1039 	/* Shared physical memory for ring pointer updates from host to FW */
1040 	void *shadow_wrptr_mem_paddr;
1041 	/* lmac srng start id */
1042 	uint8_t lmac_srng_start_id;
1043 };
1044 
1045 /* SRNG parameters to be passed to hal_srng_setup */
1046 struct hal_srng_params {
1047 	/* Physical base address of the ring */
1048 	qdf_dma_addr_t ring_base_paddr;
1049 	/* Virtual base address of the ring */
1050 	void *ring_base_vaddr;
1051 	/* Number of entries in ring */
1052 	uint32_t num_entries;
1053 	/* max transfer length */
1054 	uint16_t max_buffer_length;
1055 	/* MSI Address */
1056 	qdf_dma_addr_t msi_addr;
1057 	/* MSI data */
1058 	uint32_t msi_data;
1059 	/* Interrupt timer threshold – in micro seconds */
1060 	uint32_t intr_timer_thres_us;
1061 	/* Interrupt batch counter threshold – in number of ring entries */
1062 	uint32_t intr_batch_cntr_thres_entries;
1063 	/* Low threshold – in number of ring entries
1064 	 * (valid for src rings only)
1065 	 */
1066 	uint32_t low_threshold;
1067 	/* Misc flags */
1068 	uint32_t flags;
1069 	/* Unique ring id */
1070 	uint8_t ring_id;
1071 	/* Source or Destination ring */
1072 	enum hal_srng_dir ring_dir;
1073 	/* Size of ring entry */
1074 	uint32_t entry_size;
1075 	/* hw register base address */
1076 	void *hwreg_base[MAX_SRNG_REG_GROUPS];
1077 	/* prefetch timer config - in micro seconds */
1078 	uint32_t prefetch_timer;
1079 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1080 	/* Near full IRQ support flag */
1081 	uint32_t nf_irq_support;
1082 	/* MSI2 Address */
1083 	qdf_dma_addr_t msi2_addr;
1084 	/* MSI2 data */
1085 	uint32_t msi2_data;
1086 	/* Critical threshold */
1087 	uint16_t crit_thresh;
1088 	/* High threshold */
1089 	uint16_t high_thresh;
1090 	/* Safe threshold */
1091 	uint16_t safe_thresh;
1092 #endif
1093 	/* Timer threshold to issue ring pointer update - in micro seconds */
1094 	uint16_t pointer_timer_threshold;
1095 	/* Number threshold of ring entries to issue pointer update */
1096 	uint8_t pointer_num_threshold;
1097 };
1098 
1099 /**
1100  * hal_construct_srng_shadow_regs() - initialize the shadow
1101  *                                    registers for srngs
1102  * @hal_soc: hal handle
1103  *
1104  * Return: QDF_STATUS_OK on success
1105  */
1106 QDF_STATUS hal_construct_srng_shadow_regs(void *hal_soc);
1107 
1108 /**
1109  * hal_set_one_shadow_config() - add a config for the specified ring
1110  * @hal_soc: hal handle
1111  * @ring_type: ring type
1112  * @ring_num: ring num
1113  *
1114  * The ring type and ring num uniquely specify the ring.  After this call,
1115  * the hp/tp will be added as the next entry int the shadow register
1116  * configuration table.  The hal code will use the shadow register address
1117  * in place of the hp/tp address.
1118  *
1119  * This function is exposed, so that the CE module can skip configuring shadow
1120  * registers for unused ring and rings assigned to the firmware.
1121  *
1122  * Return: QDF_STATUS_OK on success
1123  */
1124 QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type,
1125 				     int ring_num);
1126 
1127 /**
1128  * hal_get_shadow_config() - retrieve the config table for shadow cfg v2
1129  * @hal_soc: hal handle
1130  * @shadow_config: will point to the table after
1131  * @num_shadow_registers_configured: will contain the number of valid entries
1132  */
1133 extern void
1134 hal_get_shadow_config(void *hal_soc,
1135 		      struct pld_shadow_reg_v2_cfg **shadow_config,
1136 		      int *num_shadow_registers_configured);
1137 
1138 #ifdef CONFIG_SHADOW_V3
1139 /**
1140  * hal_get_shadow_v3_config() - retrieve the config table for shadow cfg v3
1141  * @hal_soc: hal handle
1142  * @shadow_config: will point to the table after
1143  * @num_shadow_registers_configured: will contain the number of valid entries
1144  */
1145 extern void
1146 hal_get_shadow_v3_config(void *hal_soc,
1147 			 struct pld_shadow_reg_v3_cfg **shadow_config,
1148 			 int *num_shadow_registers_configured);
1149 #endif
1150 
1151 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1152 /**
1153  * hal_srng_is_near_full_irq_supported() - Check if srng supports near full irq
1154  * @hal_soc: HAL SoC handle [To be validated by caller]
1155  * @ring_type: srng type
1156  * @ring_num: The index of the srng (of the same type)
1157  *
1158  * Return: true, if srng support near full irq trigger
1159  *	false, if the srng does not support near full irq support.
1160  */
1161 bool hal_srng_is_near_full_irq_supported(hal_soc_handle_t hal_soc,
1162 					 int ring_type, int ring_num);
1163 #else
1164 static inline
hal_srng_is_near_full_irq_supported(hal_soc_handle_t hal_soc,int ring_type,int ring_num)1165 bool hal_srng_is_near_full_irq_supported(hal_soc_handle_t hal_soc,
1166 					 int ring_type, int ring_num)
1167 {
1168 	return false;
1169 }
1170 #endif
1171 
1172 /**
1173  * hal_srng_setup() - Initialize HW SRNG ring.
1174  * @hal_soc: Opaque HAL SOC handle
1175  * @ring_type: one of the types from hal_ring_type
1176  * @ring_num: Ring number if there are multiple rings of
1177  *		same type (staring from 0)
1178  * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
1179  * @ring_params: SRNG ring params in hal_srng_params structure.
1180  * @idle_check: Check if ring is idle
1181  *
1182  * Callers are expected to allocate contiguous ring memory of size
1183  * 'num_entries * entry_size' bytes and pass the physical and virtual base
1184  * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
1185  * structure. Ring base address should be 8 byte aligned and size of each ring
1186  * entry should be queried using the API hal_srng_get_entrysize
1187  *
1188  * Return: Opaque pointer to ring on success
1189  *		 NULL on failure (if given ring is not available)
1190  */
1191 void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
1192 		     int mac_id, struct hal_srng_params *ring_params,
1193 		     bool idle_check);
1194 
1195 /**
1196  * hal_srng_setup_idx() - Initialize HW SRNG ring.
1197  * @hal_soc: Opaque HAL SOC handle
1198  * @ring_type: one of the types from hal_ring_type
1199  * @ring_num: Ring number if there are multiple rings of
1200  *		same type (staring from 0)
1201  * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
1202  * @ring_params: SRNG ring params in hal_srng_params structure.
1203  * @idle_check: Check if ring is idle
1204  * @idx: Ring index
1205  *
1206  * Callers are expected to allocate contiguous ring memory of size
1207  * 'num_entries * entry_size' bytes and pass the physical and virtual base
1208  * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
1209  * structure. Ring base address should be 8 byte aligned and size of each ring
1210  * entry should be queried using the API hal_srng_get_entrysize
1211  *
1212  * Return: Opaque pointer to ring on success
1213  *		 NULL on failure (if given ring is not available)
1214  */
1215 void *hal_srng_setup_idx(void *hal_soc, int ring_type, int ring_num,
1216 			 int mac_id, struct hal_srng_params *ring_params,
1217 			 bool idle_check, uint32_t idx);
1218 
1219 
1220 /* Remapping ids of REO rings */
1221 #define REO_REMAP_TCL 0
1222 #define REO_REMAP_SW1 1
1223 #define REO_REMAP_SW2 2
1224 #define REO_REMAP_SW3 3
1225 #define REO_REMAP_SW4 4
1226 #define REO_REMAP_RELEASE 5
1227 #define REO_REMAP_FW 6
1228 /*
1229  * In Beryllium: 4 bits REO destination ring value is defined as: 0: TCL
1230  * 1:SW1  2:SW2  3:SW3  4:SW4  5:Release  6:FW(WIFI)  7:SW5
1231  * 8:SW6 9:SW7  10:SW8  11: NOT_USED.
1232  *
1233  */
1234 #define REO_REMAP_SW5 7
1235 #define REO_REMAP_SW6 8
1236 #define REO_REMAP_SW7 9
1237 #define REO_REMAP_SW8 10
1238 
1239 /*
1240  * Macro to access HWIO_REO_R0_ERROR_DESTINATION_RING_CTRL_IX_0
1241  * to map destination to rings
1242  */
1243 #define HAL_REO_ERR_REMAP_IX0(_VALUE, _OFFSET) \
1244 	((_VALUE) << \
1245 	 (HWIO_REO_R0_ERROR_DESTINATION_MAPPING_IX_0_ERROR_ ## \
1246 	  DESTINATION_RING_ ## _OFFSET ## _SHFT))
1247 
1248 /*
1249  * Macro to access HWIO_REO_R0_ERROR_DESTINATION_RING_CTRL_IX_1
1250  * to map destination to rings
1251  */
1252 #define HAL_REO_ERR_REMAP_IX1(_VALUE, _OFFSET) \
1253 	((_VALUE) << \
1254 	 (HWIO_REO_R0_ERROR_DESTINATION_MAPPING_IX_1_ERROR_ ## \
1255 	  DESTINATION_RING_ ## _OFFSET ## _SHFT))
1256 
1257 /*
1258  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0
1259  * to map destination to rings
1260  */
1261 #define HAL_REO_REMAP_IX0(_VALUE, _OFFSET) \
1262 	((_VALUE) << \
1263 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_DEST_RING_MAPPING_ ## \
1264 	  _OFFSET ## _SHFT))
1265 
1266 /*
1267  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_1
1268  * to map destination to rings
1269  */
1270 #define HAL_REO_REMAP_IX2(_VALUE, _OFFSET) \
1271 	((_VALUE) << \
1272 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_DEST_RING_MAPPING_ ## \
1273 	  _OFFSET ## _SHFT))
1274 
1275 /*
1276  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3
1277  * to map destination to rings
1278  */
1279 #define HAL_REO_REMAP_IX3(_VALUE, _OFFSET) \
1280 	((_VALUE) << \
1281 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_DEST_RING_MAPPING_ ## \
1282 	  _OFFSET ## _SHFT))
1283 
1284 /**
1285  * hal_reo_read_write_ctrl_ix() - Read or write REO_DESTINATION_RING_CTRL_IX
1286  * @hal_soc_hdl: HAL SOC handle
1287  * @read: boolean value to indicate if read or write
1288  * @ix0: pointer to store IX0 reg value
1289  * @ix1: pointer to store IX1 reg value
1290  * @ix2: pointer to store IX2 reg value
1291  * @ix3: pointer to store IX3 reg value
1292  */
1293 void hal_reo_read_write_ctrl_ix(hal_soc_handle_t hal_soc_hdl, bool read,
1294 				uint32_t *ix0, uint32_t *ix1,
1295 				uint32_t *ix2, uint32_t *ix3);
1296 
1297 /**
1298  * hal_srng_dst_set_hp_paddr_confirm() - Set physical address to dest SRNG head
1299  *  pointer and confirm that write went through by reading back the value
1300  * @sring: sring pointer
1301  * @paddr: physical address
1302  *
1303  * Return: None
1304  */
1305 void hal_srng_dst_set_hp_paddr_confirm(struct hal_srng *sring,
1306 				       uint64_t paddr);
1307 
1308 /**
1309  * hal_srng_dst_init_hp() - Initialize head pointer with cached head pointer
1310  * @hal_soc: hal_soc handle
1311  * @srng: sring pointer
1312  * @vaddr: virtual address
1313  */
1314 void hal_srng_dst_init_hp(struct hal_soc_handle *hal_soc,
1315 			  struct hal_srng *srng,
1316 			  uint32_t *vaddr);
1317 
1318 /**
1319  * hal_srng_dst_update_hp_addr() - Update hp_addr with current HW HP value
1320  * @hal_soc: hal_soc handle
1321  * @hal_ring_hdl: Opaque HAL SRNG pointer
1322  *
1323  * Return: None
1324  */
1325 void hal_srng_dst_update_hp_addr(struct hal_soc_handle *hal_soc,
1326 				 hal_ring_handle_t hal_ring_hdl);
1327 
1328 /**
1329  * hal_srng_cleanup() - Deinitialize HW SRNG ring.
1330  * @hal_soc: Opaque HAL SOC handle
1331  * @hal_ring_hdl: Opaque HAL SRNG pointer
1332  * @umac_reset_inprogress: UMAC reset enabled/disabled.
1333  */
1334 void hal_srng_cleanup(void *hal_soc, hal_ring_handle_t hal_ring_hdl,
1335 		      bool umac_reset_inprogress);
1336 
hal_srng_initialized(hal_ring_handle_t hal_ring_hdl)1337 static inline bool hal_srng_initialized(hal_ring_handle_t hal_ring_hdl)
1338 {
1339 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1340 
1341 	return !!srng->initialized;
1342 }
1343 
1344 /**
1345  * hal_srng_dst_peek() - Check if there are any entries in the ring (peek)
1346  * @hal_soc_hdl: Opaque HAL SOC handle
1347  * @hal_ring_hdl: Destination ring pointer
1348  *
1349  * Caller takes responsibility for any locking needs.
1350  *
1351  * Return: Opaque pointer for next ring entry; NULL on failire
1352  */
1353 static inline
hal_srng_dst_peek(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl)1354 void *hal_srng_dst_peek(hal_soc_handle_t hal_soc_hdl,
1355 			hal_ring_handle_t hal_ring_hdl)
1356 {
1357 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1358 
1359 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
1360 		return (void *)(&srng->ring_base_vaddr[srng->u.dst_ring.tp]);
1361 
1362 	return NULL;
1363 }
1364 
1365 
1366 /**
1367  * hal_mem_dma_cache_sync() - Cache sync the specified virtual address Range
1368  * @soc: HAL soc handle
1369  * @desc: desc start address
1370  * @entry_size: size of memory to sync
1371  *
1372  * Return: void
1373  */
1374 #if defined(__LINUX_MIPS32_ARCH__) || defined(__LINUX_MIPS64_ARCH__)
hal_mem_dma_cache_sync(struct hal_soc * soc,uint32_t * desc,uint32_t entry_size)1375 static inline void hal_mem_dma_cache_sync(struct hal_soc *soc, uint32_t *desc,
1376 					  uint32_t entry_size)
1377 {
1378 	qdf_nbuf_dma_inv_range((void *)desc, (void *)(desc + entry_size));
1379 }
1380 #else
hal_mem_dma_cache_sync(struct hal_soc * soc,uint32_t * desc,uint32_t entry_size)1381 static inline void hal_mem_dma_cache_sync(struct hal_soc *soc, uint32_t *desc,
1382 					  uint32_t entry_size)
1383 {
1384 	qdf_mem_dma_cache_sync(soc->qdf_dev, qdf_mem_virt_to_phys(desc),
1385 			       QDF_DMA_FROM_DEVICE,
1386 			       (entry_size * sizeof(uint32_t)));
1387 }
1388 #endif
1389 
1390 /**
1391  * hal_srng_access_start_unlocked() - Start ring access (unlocked). Should use
1392  * hal_srng_access_start() if locked access is required
1393  * @hal_soc_hdl: Opaque HAL SOC handle
1394  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1395  *
1396  * This API doesn't implement any byte-order conversion on reading hp/tp.
1397  * So, Use API only for those srngs for which the target writes hp/tp values to
1398  * the DDR in the Host order.
1399  *
1400  * Return: 0 on success; error on failire
1401  */
1402 static inline int
hal_srng_access_start_unlocked(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl)1403 hal_srng_access_start_unlocked(hal_soc_handle_t hal_soc_hdl,
1404 			       hal_ring_handle_t hal_ring_hdl)
1405 {
1406 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1407 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
1408 	uint32_t *desc;
1409 
1410 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
1411 		srng->u.src_ring.cached_tp =
1412 			*(volatile uint32_t *)(srng->u.src_ring.tp_addr);
1413 	else {
1414 		srng->u.dst_ring.cached_hp =
1415 			*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1416 
1417 		if (srng->flags & HAL_SRNG_CACHED_DESC) {
1418 			desc = hal_srng_dst_peek(hal_soc_hdl, hal_ring_hdl);
1419 			if (qdf_likely(desc)) {
1420 				hal_mem_dma_cache_sync(soc, desc,
1421 						       srng->entry_size);
1422 				qdf_prefetch(desc);
1423 			}
1424 		}
1425 	}
1426 
1427 	return 0;
1428 }
1429 
1430 /**
1431  * hal_le_srng_access_start_unlocked_in_cpu_order() - Start ring access
1432  * (unlocked) with endianness correction.
1433  * @hal_soc_hdl: Opaque HAL SOC handle
1434  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1435  *
1436  * This API provides same functionally as hal_srng_access_start_unlocked()
1437  * except that it converts the little-endian formatted hp/tp values to
1438  * Host order on reading them. So, this API should only be used for those srngs
1439  * for which the target always writes hp/tp values in little-endian order
1440  * regardless of Host order.
1441  *
1442  * Also, this API doesn't take the lock. For locked access, use
1443  * hal_srng_access_start/hal_le_srng_access_start_in_cpu_order.
1444  *
1445  * Return: 0 on success; error on failire
1446  */
1447 static inline int
hal_le_srng_access_start_unlocked_in_cpu_order(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl)1448 hal_le_srng_access_start_unlocked_in_cpu_order(
1449 	hal_soc_handle_t hal_soc_hdl,
1450 	hal_ring_handle_t hal_ring_hdl)
1451 {
1452 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1453 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
1454 	uint32_t *desc;
1455 
1456 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
1457 		srng->u.src_ring.cached_tp =
1458 			qdf_le32_to_cpu(*(volatile uint32_t *)
1459 					(srng->u.src_ring.tp_addr));
1460 	else {
1461 		srng->u.dst_ring.cached_hp =
1462 			qdf_le32_to_cpu(*(volatile uint32_t *)
1463 					(srng->u.dst_ring.hp_addr));
1464 
1465 		if (srng->flags & HAL_SRNG_CACHED_DESC) {
1466 			desc = hal_srng_dst_peek(hal_soc_hdl, hal_ring_hdl);
1467 			if (qdf_likely(desc)) {
1468 				hal_mem_dma_cache_sync(soc, desc,
1469 						       srng->entry_size);
1470 				qdf_prefetch(desc);
1471 			}
1472 		}
1473 	}
1474 
1475 	return 0;
1476 }
1477 
1478 /**
1479  * hal_srng_try_access_start() - Try to start (locked) ring access
1480  * @hal_soc_hdl: Opaque HAL SOC handle
1481  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1482  *
1483  * Return: 0 on success; error on failure
1484  */
hal_srng_try_access_start(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl)1485 static inline int hal_srng_try_access_start(hal_soc_handle_t hal_soc_hdl,
1486 					    hal_ring_handle_t hal_ring_hdl)
1487 {
1488 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1489 
1490 	if (qdf_unlikely(!hal_ring_hdl)) {
1491 		qdf_print("Error: Invalid hal_ring\n");
1492 		return -EINVAL;
1493 	}
1494 
1495 	if (!SRNG_TRY_LOCK(&(srng->lock)))
1496 		return -EINVAL;
1497 
1498 	return hal_srng_access_start_unlocked(hal_soc_hdl, hal_ring_hdl);
1499 }
1500 
1501 /**
1502  * hal_srng_access_start() - Start (locked) ring access
1503  *
1504  * @hal_soc_hdl: Opaque HAL SOC handle
1505  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1506  *
1507  * This API doesn't implement any byte-order conversion on reading hp/tp.
1508  * So, Use API only for those srngs for which the target writes hp/tp values to
1509  * the DDR in the Host order.
1510  *
1511  * Return: 0 on success; error on failire
1512  */
hal_srng_access_start(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl)1513 static inline int hal_srng_access_start(hal_soc_handle_t hal_soc_hdl,
1514 					hal_ring_handle_t hal_ring_hdl)
1515 {
1516 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1517 
1518 	if (qdf_unlikely(!hal_ring_hdl)) {
1519 		qdf_print("Error: Invalid hal_ring\n");
1520 		return -EINVAL;
1521 	}
1522 
1523 	SRNG_LOCK(&(srng->lock));
1524 
1525 	return hal_srng_access_start_unlocked(hal_soc_hdl, hal_ring_hdl);
1526 }
1527 
1528 /**
1529  * hal_le_srng_access_start_in_cpu_order() - Start (locked) ring access with
1530  * endianness correction
1531  * @hal_soc_hdl: Opaque HAL SOC handle
1532  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1533  *
1534  * This API provides same functionally as hal_srng_access_start()
1535  * except that it converts the little-endian formatted hp/tp values to
1536  * Host order on reading them. So, this API should only be used for those srngs
1537  * for which the target always writes hp/tp values in little-endian order
1538  * regardless of Host order.
1539  *
1540  * Return: 0 on success; error on failire
1541  */
1542 static inline int
hal_le_srng_access_start_in_cpu_order(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl)1543 hal_le_srng_access_start_in_cpu_order(
1544 	hal_soc_handle_t hal_soc_hdl,
1545 	hal_ring_handle_t hal_ring_hdl)
1546 {
1547 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1548 
1549 	if (qdf_unlikely(!hal_ring_hdl)) {
1550 		qdf_print("Error: Invalid hal_ring\n");
1551 		return -EINVAL;
1552 	}
1553 
1554 	SRNG_LOCK(&(srng->lock));
1555 
1556 	return hal_le_srng_access_start_unlocked_in_cpu_order(
1557 			hal_soc_hdl, hal_ring_hdl);
1558 }
1559 
1560 /**
1561  * hal_srng_dst_get_next() - Get next entry from a destination ring
1562  * @hal_soc: Opaque HAL SOC handle
1563  * @hal_ring_hdl: Destination ring pointer
1564  *
1565  * Return: Opaque pointer for next ring entry; NULL on failure
1566  */
1567 static inline
hal_srng_dst_get_next(void * hal_soc,hal_ring_handle_t hal_ring_hdl)1568 void *hal_srng_dst_get_next(void *hal_soc,
1569 			    hal_ring_handle_t hal_ring_hdl)
1570 {
1571 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1572 	uint32_t *desc;
1573 
1574 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
1575 		return NULL;
1576 
1577 	desc = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1578 	/* TODO: Using % is expensive, but we have to do this since
1579 	 * size of some SRNG rings is not power of 2 (due to descriptor
1580 	 * sizes). Need to create separate API for rings used
1581 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1582 	 * SW2RXDMA and CE rings)
1583 	 */
1584 	srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size);
1585 	if (srng->u.dst_ring.tp == srng->ring_size)
1586 		srng->u.dst_ring.tp = 0;
1587 
1588 	if (srng->flags & HAL_SRNG_CACHED_DESC) {
1589 		struct hal_soc *soc = (struct hal_soc *)hal_soc;
1590 		uint32_t *desc_next;
1591 		uint32_t tp;
1592 
1593 		tp = srng->u.dst_ring.tp;
1594 		desc_next = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1595 		hal_mem_dma_cache_sync(soc, desc_next, srng->entry_size);
1596 		qdf_prefetch(desc_next);
1597 	}
1598 
1599 	return (void *)desc;
1600 }
1601 
1602 /**
1603  * hal_srng_dst_get_next_cached() - Get cached next entry
1604  * @hal_soc: Opaque HAL SOC handle
1605  * @hal_ring_hdl: Destination ring pointer
1606  *
1607  * Get next entry from a destination ring and move cached tail pointer
1608  *
1609  * Return: Opaque pointer for next ring entry; NULL on failure
1610  */
1611 static inline
hal_srng_dst_get_next_cached(void * hal_soc,hal_ring_handle_t hal_ring_hdl)1612 void *hal_srng_dst_get_next_cached(void *hal_soc,
1613 				   hal_ring_handle_t hal_ring_hdl)
1614 {
1615 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1616 	uint32_t *desc;
1617 	uint32_t *desc_next;
1618 
1619 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
1620 		return NULL;
1621 
1622 	desc = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1623 	/* TODO: Using % is expensive, but we have to do this since
1624 	 * size of some SRNG rings is not power of 2 (due to descriptor
1625 	 * sizes). Need to create separate API for rings used
1626 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1627 	 * SW2RXDMA and CE rings)
1628 	 */
1629 	srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size);
1630 	if (srng->u.dst_ring.tp == srng->ring_size)
1631 		srng->u.dst_ring.tp = 0;
1632 
1633 	desc_next = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1634 	qdf_prefetch(desc_next);
1635 	return (void *)desc;
1636 }
1637 
1638 /**
1639  * hal_srng_dst_dec_tp() - decrement the TP of the Dst ring by one entry
1640  * @hal_soc: Opaque HAL SOC handle
1641  * @hal_ring_hdl: Destination ring pointer
1642  *
1643  * reset the tail pointer in the destination ring by one entry
1644  *
1645  */
1646 static inline
hal_srng_dst_dec_tp(void * hal_soc,hal_ring_handle_t hal_ring_hdl)1647 void hal_srng_dst_dec_tp(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1648 {
1649 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1650 
1651 	if (qdf_unlikely(!srng->u.dst_ring.tp))
1652 		srng->u.dst_ring.tp = (srng->ring_size - srng->entry_size);
1653 	else
1654 		srng->u.dst_ring.tp -= srng->entry_size;
1655 }
1656 
hal_srng_lock(hal_ring_handle_t hal_ring_hdl)1657 static inline int hal_srng_lock(hal_ring_handle_t hal_ring_hdl)
1658 {
1659 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1660 
1661 	if (qdf_unlikely(!hal_ring_hdl)) {
1662 		qdf_print("error: invalid hal_ring\n");
1663 		return -EINVAL;
1664 	}
1665 
1666 	SRNG_LOCK(&(srng->lock));
1667 	return 0;
1668 }
1669 
hal_srng_unlock(hal_ring_handle_t hal_ring_hdl)1670 static inline int hal_srng_unlock(hal_ring_handle_t hal_ring_hdl)
1671 {
1672 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1673 
1674 	if (qdf_unlikely(!hal_ring_hdl)) {
1675 		qdf_print("error: invalid hal_ring\n");
1676 		return -EINVAL;
1677 	}
1678 
1679 	SRNG_UNLOCK(&(srng->lock));
1680 	return 0;
1681 }
1682 
1683 /**
1684  * hal_srng_dst_get_next_hp() - Get next entry from a destination ring and move
1685  *                              cached head pointer
1686  * @hal_soc_hdl: Opaque HAL SOC handle
1687  * @hal_ring_hdl: Destination ring pointer
1688  *
1689  * Return: Opaque pointer for next ring entry; NULL on failire
1690  */
1691 static inline void *
hal_srng_dst_get_next_hp(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl)1692 hal_srng_dst_get_next_hp(hal_soc_handle_t hal_soc_hdl,
1693 			 hal_ring_handle_t hal_ring_hdl)
1694 {
1695 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1696 	uint32_t *desc;
1697 	/* TODO: Using % is expensive, but we have to do this since
1698 	 * size of some SRNG rings is not power of 2 (due to descriptor
1699 	 * sizes). Need to create separate API for rings used
1700 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1701 	 * SW2RXDMA and CE rings)
1702 	 */
1703 	uint32_t next_hp = (srng->u.dst_ring.cached_hp + srng->entry_size) %
1704 		srng->ring_size;
1705 
1706 	if (next_hp != srng->u.dst_ring.tp) {
1707 		desc = &(srng->ring_base_vaddr[srng->u.dst_ring.cached_hp]);
1708 		srng->u.dst_ring.cached_hp = next_hp;
1709 		return (void *)desc;
1710 	}
1711 
1712 	return NULL;
1713 }
1714 
1715 /**
1716  * hal_srng_dst_peek_sync() - Check if there are any entries in the ring (peek)
1717  * @hal_soc_hdl: Opaque HAL SOC handle
1718  * @hal_ring_hdl: Destination ring pointer
1719  *
1720  * Sync cached head pointer with HW.
1721  * Caller takes responsibility for any locking needs.
1722  *
1723  * Return: Opaque pointer for next ring entry; NULL on failire
1724  */
1725 static inline
hal_srng_dst_peek_sync(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl)1726 void *hal_srng_dst_peek_sync(hal_soc_handle_t hal_soc_hdl,
1727 			     hal_ring_handle_t hal_ring_hdl)
1728 {
1729 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1730 
1731 	srng->u.dst_ring.cached_hp =
1732 		*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1733 
1734 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
1735 		return (void *)(&(srng->ring_base_vaddr[srng->u.dst_ring.tp]));
1736 
1737 	return NULL;
1738 }
1739 
1740 /**
1741  * hal_srng_dst_peek_sync_locked() - Peek for any entries in the ring
1742  * @hal_soc_hdl: Opaque HAL SOC handle
1743  * @hal_ring_hdl: Destination ring pointer
1744  *
1745  * Sync cached head pointer with HW.
1746  * This function takes up SRNG_LOCK. Should not be called with SRNG lock held.
1747  *
1748  * Return: Opaque pointer for next ring entry; NULL on failire
1749  */
1750 static inline
hal_srng_dst_peek_sync_locked(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl)1751 void *hal_srng_dst_peek_sync_locked(hal_soc_handle_t hal_soc_hdl,
1752 				    hal_ring_handle_t hal_ring_hdl)
1753 {
1754 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1755 	void *ring_desc_ptr = NULL;
1756 
1757 	if (qdf_unlikely(!hal_ring_hdl)) {
1758 		qdf_print("Error: Invalid hal_ring\n");
1759 		return  NULL;
1760 	}
1761 
1762 	SRNG_LOCK(&srng->lock);
1763 
1764 	ring_desc_ptr = hal_srng_dst_peek_sync(hal_soc_hdl, hal_ring_hdl);
1765 
1766 	SRNG_UNLOCK(&srng->lock);
1767 
1768 	return ring_desc_ptr;
1769 }
1770 
1771 #define hal_srng_dst_num_valid_nolock(hal_soc, hal_ring_hdl, sync_hw_ptr) \
1772 		hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, sync_hw_ptr)
1773 
1774 /**
1775  * hal_srng_dst_num_valid() - Returns number of valid entries (to be processed
1776  *                            by SW) in destination ring
1777  * @hal_soc: Opaque HAL SOC handle
1778  * @hal_ring_hdl: Destination ring pointer
1779  * @sync_hw_ptr: Sync cached head pointer with HW
1780  *
1781  * Return: number of valid entries
1782  */
1783 static inline
hal_srng_dst_num_valid(void * hal_soc,hal_ring_handle_t hal_ring_hdl,int sync_hw_ptr)1784 uint32_t hal_srng_dst_num_valid(void *hal_soc,
1785 				hal_ring_handle_t hal_ring_hdl,
1786 				int sync_hw_ptr)
1787 {
1788 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1789 	uint32_t hp;
1790 	uint32_t tp = srng->u.dst_ring.tp;
1791 
1792 	if (sync_hw_ptr) {
1793 		hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1794 		srng->u.dst_ring.cached_hp = hp;
1795 	} else {
1796 		hp = srng->u.dst_ring.cached_hp;
1797 	}
1798 
1799 	if (hp >= tp)
1800 		return (hp - tp) / srng->entry_size;
1801 
1802 	return (srng->ring_size - tp + hp) / srng->entry_size;
1803 }
1804 
1805 /**
1806  * hal_srng_dst_inv_cached_descs() - API to invalidate descriptors in batch mode
1807  * @hal_soc: Opaque HAL SOC handle
1808  * @hal_ring_hdl: Destination ring pointer
1809  * @entry_count: call invalidate API if valid entries available
1810  *
1811  * Invalidates a set of cached descriptors starting from TP to cached_HP
1812  *
1813  * Return: HAL ring descriptor
1814  */
1815 static inline void *
hal_srng_dst_inv_cached_descs(void * hal_soc,hal_ring_handle_t hal_ring_hdl,uint32_t entry_count)1816 hal_srng_dst_inv_cached_descs(void *hal_soc,
1817 			      hal_ring_handle_t hal_ring_hdl,
1818 			      uint32_t entry_count)
1819 {
1820 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1821 	uint32_t *first_desc;
1822 	uint32_t *last_desc;
1823 	uint32_t last_desc_index;
1824 
1825 	/*
1826 	 * If SRNG does not have cached descriptors this
1827 	 * API call should be a no op
1828 	 */
1829 	if (!(srng->flags & HAL_SRNG_CACHED_DESC))
1830 		return NULL;
1831 
1832 	if (!entry_count)
1833 		return NULL;
1834 
1835 	first_desc = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1836 
1837 	last_desc_index = (srng->u.dst_ring.tp +
1838 			   (entry_count * srng->entry_size)) %
1839 			  srng->ring_size;
1840 
1841 	last_desc =  &srng->ring_base_vaddr[last_desc_index];
1842 
1843 	if (last_desc > (uint32_t *)first_desc)
1844 		/* invalidate from tp to cached_hp */
1845 		qdf_nbuf_dma_inv_range_no_dsb((void *)first_desc,
1846 					      (void *)(last_desc));
1847 	else {
1848 		/* invalidate from tp to end of the ring */
1849 		qdf_nbuf_dma_inv_range_no_dsb((void *)first_desc,
1850 					      (void *)srng->ring_vaddr_end);
1851 
1852 		/* invalidate from start of ring to cached_hp */
1853 		qdf_nbuf_dma_inv_range_no_dsb((void *)srng->ring_base_vaddr,
1854 					      (void *)last_desc);
1855 	}
1856 	qdf_dsb();
1857 
1858 	return last_desc;
1859 }
1860 
1861 /**
1862  * hal_srng_dst_num_valid_locked() - Returns num valid entries to be processed
1863  * @hal_soc: Opaque HAL SOC handle
1864  * @hal_ring_hdl: Destination ring pointer
1865  * @sync_hw_ptr: Sync cached head pointer with HW
1866  *
1867  * Returns number of valid entries to be processed by the host driver. The
1868  * function takes up SRNG lock.
1869  *
1870  * Return: Number of valid destination entries
1871  */
1872 static inline uint32_t
hal_srng_dst_num_valid_locked(hal_soc_handle_t hal_soc,hal_ring_handle_t hal_ring_hdl,int sync_hw_ptr)1873 hal_srng_dst_num_valid_locked(hal_soc_handle_t hal_soc,
1874 			      hal_ring_handle_t hal_ring_hdl,
1875 			      int sync_hw_ptr)
1876 {
1877 	uint32_t num_valid;
1878 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1879 
1880 	SRNG_LOCK(&srng->lock);
1881 	num_valid = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, sync_hw_ptr);
1882 	SRNG_UNLOCK(&srng->lock);
1883 
1884 	return num_valid;
1885 }
1886 
1887 /**
1888  * hal_srng_sync_cachedhp() - sync cachehp pointer from hw hp
1889  * @hal_soc: Opaque HAL SOC handle
1890  * @hal_ring_hdl: Destination ring pointer
1891  *
1892  */
1893 static inline
hal_srng_sync_cachedhp(void * hal_soc,hal_ring_handle_t hal_ring_hdl)1894 void hal_srng_sync_cachedhp(void *hal_soc,
1895 				hal_ring_handle_t hal_ring_hdl)
1896 {
1897 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1898 	uint32_t hp;
1899 
1900 	hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1901 	srng->u.dst_ring.cached_hp = hp;
1902 }
1903 
1904 /**
1905  * hal_srng_src_reap_next() - Reap next entry from a source ring
1906  * @hal_soc: Opaque HAL SOC handle
1907  * @hal_ring_hdl: Source ring pointer
1908  *
1909  * Reaps next entry from a source ring and moves reap pointer. This
1910  * can be used to release any buffers associated with completed ring
1911  * entries. Note that this should not be used for posting new
1912  * descriptor entries. Posting of new entries should be done only
1913  * using hal_srng_src_get_next_reaped() when this function is used for
1914  * reaping.
1915  *
1916  * Return: Opaque pointer for next ring entry; NULL on failire
1917  */
1918 static inline void *
hal_srng_src_reap_next(void * hal_soc,hal_ring_handle_t hal_ring_hdl)1919 hal_srng_src_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1920 {
1921 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1922 	uint32_t *desc;
1923 
1924 	/* TODO: Using % is expensive, but we have to do this since
1925 	 * size of some SRNG rings is not power of 2 (due to descriptor
1926 	 * sizes). Need to create separate API for rings used
1927 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1928 	 * SW2RXDMA and CE rings)
1929 	 */
1930 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1931 		srng->ring_size;
1932 
1933 	if (next_reap_hp != srng->u.src_ring.cached_tp) {
1934 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
1935 		srng->u.src_ring.reap_hp = next_reap_hp;
1936 		return (void *)desc;
1937 	}
1938 
1939 	return NULL;
1940 }
1941 
1942 /**
1943  * hal_srng_src_get_next_reaped() - Get next reaped entry from a source ring
1944  * @hal_soc: Opaque HAL SOC handle
1945  * @hal_ring_hdl: Source ring pointer
1946  *
1947  * Gets next entry from a source ring that is already reaped using
1948  * hal_srng_src_reap_next(), for posting new entries to the ring
1949  *
1950  * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire
1951  */
1952 static inline void *
hal_srng_src_get_next_reaped(void * hal_soc,hal_ring_handle_t hal_ring_hdl)1953 hal_srng_src_get_next_reaped(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1954 {
1955 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1956 	uint32_t *desc;
1957 
1958 	if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) {
1959 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
1960 		srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
1961 			srng->ring_size;
1962 
1963 		return (void *)desc;
1964 	}
1965 
1966 	return NULL;
1967 }
1968 
1969 /**
1970  * hal_srng_src_pending_reap_next() - Reap next entry from a source ring
1971  * @hal_soc: Opaque HAL SOC handle
1972  * @hal_ring_hdl: Source ring pointer
1973  *
1974  * Reaps next entry from a source ring and move reap pointer. This API
1975  * is used in detach path to release any buffers associated with ring
1976  * entries which are pending reap.
1977  *
1978  * Return: Opaque pointer for next ring entry; NULL on failire
1979  */
1980 static inline void *
hal_srng_src_pending_reap_next(void * hal_soc,hal_ring_handle_t hal_ring_hdl)1981 hal_srng_src_pending_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1982 {
1983 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1984 	uint32_t *desc;
1985 
1986 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1987 		srng->ring_size;
1988 
1989 	if (next_reap_hp != srng->u.src_ring.hp) {
1990 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
1991 		srng->u.src_ring.reap_hp = next_reap_hp;
1992 		return (void *)desc;
1993 	}
1994 
1995 	return NULL;
1996 }
1997 
1998 /**
1999  * hal_srng_src_done_val() -
2000  * @hal_soc: Opaque HAL SOC handle
2001  * @hal_ring_hdl: Source ring pointer
2002  *
2003  * Return: Opaque pointer for next ring entry; NULL on failire
2004  */
2005 static inline uint32_t
hal_srng_src_done_val(void * hal_soc,hal_ring_handle_t hal_ring_hdl)2006 hal_srng_src_done_val(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
2007 {
2008 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2009 	/* TODO: Using % is expensive, but we have to do this since
2010 	 * size of some SRNG rings is not power of 2 (due to descriptor
2011 	 * sizes). Need to create separate API for rings used
2012 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
2013 	 * SW2RXDMA and CE rings)
2014 	 */
2015 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
2016 		srng->ring_size;
2017 
2018 	if (next_reap_hp == srng->u.src_ring.cached_tp)
2019 		return 0;
2020 
2021 	if (srng->u.src_ring.cached_tp > next_reap_hp)
2022 		return (srng->u.src_ring.cached_tp - next_reap_hp) /
2023 			srng->entry_size;
2024 	else
2025 		return ((srng->ring_size - next_reap_hp) +
2026 			srng->u.src_ring.cached_tp) / srng->entry_size;
2027 }
2028 
2029 /**
2030  * hal_get_entrysize_from_srng() - Retrieve ring entry size
2031  * @hal_ring_hdl: Source ring pointer
2032  *
2033  * srng->entry_size value is in 4 byte dwords so left shifting
2034  * this by 2 to return the value of entry_size in bytes.
2035  *
2036  * Return: uint8_t
2037  */
2038 static inline
hal_get_entrysize_from_srng(hal_ring_handle_t hal_ring_hdl)2039 uint8_t hal_get_entrysize_from_srng(hal_ring_handle_t hal_ring_hdl)
2040 {
2041 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2042 
2043 	return srng->entry_size << 2;
2044 }
2045 
2046 /**
2047  * hal_get_sw_hptp() - Get SW head and tail pointer location for any ring
2048  * @hal_soc: Opaque HAL SOC handle
2049  * @hal_ring_hdl: Source ring pointer
2050  * @tailp: Tail Pointer
2051  * @headp: Head Pointer
2052  *
2053  * Return: Update tail pointer and head pointer in arguments.
2054  */
2055 static inline
hal_get_sw_hptp(void * hal_soc,hal_ring_handle_t hal_ring_hdl,uint32_t * tailp,uint32_t * headp)2056 void hal_get_sw_hptp(void *hal_soc, hal_ring_handle_t hal_ring_hdl,
2057 		     uint32_t *tailp, uint32_t *headp)
2058 {
2059 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2060 
2061 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
2062 		*headp = srng->u.src_ring.hp;
2063 		*tailp = *srng->u.src_ring.tp_addr;
2064 	} else {
2065 		*tailp = srng->u.dst_ring.tp;
2066 		*headp = *srng->u.dst_ring.hp_addr;
2067 	}
2068 }
2069 
2070 #if defined(CLEAR_SW2TCL_CONSUMED_DESC)
2071 /**
2072  * hal_srng_src_get_next_consumed() - Get the next desc if consumed by HW
2073  * @hal_soc: Opaque HAL SOC handle
2074  * @hal_ring_hdl: Source ring pointer
2075  *
2076  * Return: pointer to descriptor if consumed by HW, else NULL
2077  */
2078 static inline
hal_srng_src_get_next_consumed(void * hal_soc,hal_ring_handle_t hal_ring_hdl)2079 void *hal_srng_src_get_next_consumed(void *hal_soc,
2080 				     hal_ring_handle_t hal_ring_hdl)
2081 {
2082 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2083 	uint32_t *desc = NULL;
2084 	/* TODO: Using % is expensive, but we have to do this since
2085 	 * size of some SRNG rings is not power of 2 (due to descriptor
2086 	 * sizes). Need to create separate API for rings used
2087 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
2088 	 * SW2RXDMA and CE rings)
2089 	 */
2090 	uint32_t next_entry = (srng->last_desc_cleared + srng->entry_size) %
2091 			      srng->ring_size;
2092 
2093 	if (next_entry != srng->u.src_ring.cached_tp) {
2094 		desc = &srng->ring_base_vaddr[next_entry];
2095 		srng->last_desc_cleared = next_entry;
2096 	}
2097 
2098 	return desc;
2099 }
2100 
2101 #else
2102 static inline
hal_srng_src_get_next_consumed(void * hal_soc,hal_ring_handle_t hal_ring_hdl)2103 void *hal_srng_src_get_next_consumed(void *hal_soc,
2104 				     hal_ring_handle_t hal_ring_hdl)
2105 {
2106 	return NULL;
2107 }
2108 #endif /* CLEAR_SW2TCL_CONSUMED_DESC */
2109 
2110 /**
2111  * hal_srng_src_peek() - get the HP of the SRC ring
2112  * @hal_soc: Opaque HAL SOC handle
2113  * @hal_ring_hdl: Source ring pointer
2114  *
2115  * get the head pointer in the src ring but do not increment it
2116  *
2117  * Return: head descriptor
2118  */
2119 static inline
hal_srng_src_peek(void * hal_soc,hal_ring_handle_t hal_ring_hdl)2120 void *hal_srng_src_peek(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
2121 {
2122 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2123 	uint32_t *desc;
2124 	uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) %
2125 		srng->ring_size;
2126 
2127 	if (next_hp != srng->u.src_ring.cached_tp) {
2128 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
2129 		return (void *)desc;
2130 	}
2131 
2132 	return NULL;
2133 }
2134 
2135 /**
2136  * hal_srng_src_get_next() - Get next entry from a source ring and move cached
2137  *                           tail pointer
2138  * @hal_soc: Opaque HAL SOC handle
2139  * @hal_ring_hdl: Source ring pointer
2140  *
2141  * Return: Opaque pointer for next ring entry; NULL on failure
2142  */
2143 static inline
hal_srng_src_get_next(void * hal_soc,hal_ring_handle_t hal_ring_hdl)2144 void *hal_srng_src_get_next(void *hal_soc,
2145 			    hal_ring_handle_t hal_ring_hdl)
2146 {
2147 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2148 	uint32_t *desc;
2149 	/* TODO: Using % is expensive, but we have to do this since
2150 	 * size of some SRNG rings is not power of 2 (due to descriptor
2151 	 * sizes). Need to create separate API for rings used
2152 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
2153 	 * SW2RXDMA and CE rings)
2154 	 */
2155 	uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) %
2156 		srng->ring_size;
2157 
2158 	if (next_hp != srng->u.src_ring.cached_tp) {
2159 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
2160 		srng->u.src_ring.hp = next_hp;
2161 		/* TODO: Since reap function is not used by all rings, we can
2162 		 * remove the following update of reap_hp in this function
2163 		 * if we can ensure that only hal_srng_src_get_next_reaped
2164 		 * is used for the rings requiring reap functionality
2165 		 */
2166 		srng->u.src_ring.reap_hp = next_hp;
2167 		return (void *)desc;
2168 	}
2169 
2170 	return NULL;
2171 }
2172 
2173 /**
2174  * hal_srng_src_peek_n_get_next() - Get next entry from a ring without
2175  *                                  moving head pointer.
2176  * @hal_soc_hdl: Opaque HAL SOC handle
2177  * @hal_ring_hdl: Source ring pointer
2178  *
2179  * hal_srng_src_get_next should be called subsequently to move the head pointer
2180  *
2181  * Return: Opaque pointer for next ring entry; NULL on failire
2182  */
2183 static inline
hal_srng_src_peek_n_get_next(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl)2184 void *hal_srng_src_peek_n_get_next(hal_soc_handle_t hal_soc_hdl,
2185 				   hal_ring_handle_t hal_ring_hdl)
2186 {
2187 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2188 	uint32_t *desc;
2189 
2190 	/* TODO: Using % is expensive, but we have to do this since
2191 	 * size of some SRNG rings is not power of 2 (due to descriptor
2192 	 * sizes). Need to create separate API for rings used
2193 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
2194 	 * SW2RXDMA and CE rings)
2195 	 */
2196 	if (((srng->u.src_ring.hp + srng->entry_size) %
2197 		srng->ring_size) != srng->u.src_ring.cached_tp) {
2198 		desc = &(srng->ring_base_vaddr[(srng->u.src_ring.hp +
2199 						srng->entry_size) %
2200 						srng->ring_size]);
2201 		return (void *)desc;
2202 	}
2203 
2204 	return NULL;
2205 }
2206 
2207 /**
2208  * hal_srng_src_dec_hp - Decrement source srng HP to previous index
2209  * @hal_soc_hdl: Opaque HAL SOC handle
2210  * @hal_ring_hdl: Source ring pointer
2211  *
2212  * Return: None
2213  */
2214 static inline
hal_srng_src_dec_hp(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl)2215 void hal_srng_src_dec_hp(hal_soc_handle_t hal_soc_hdl,
2216 			 hal_ring_handle_t hal_ring_hdl)
2217 {
2218 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2219 	uint32_t hp = srng->u.src_ring.hp;
2220 
2221 	/* This HP adjustment is mostly done in error cases.
2222 	 * Only local HP is being decremented not the value
2223 	 * communicated to consumer or H.W.
2224 	 */
2225 	if (hp == srng->u.src_ring.cached_tp)
2226 		return;
2227 	else if (hp == 0)
2228 		hp = srng->ring_size - srng->entry_size;
2229 	else
2230 		hp = (hp - srng->entry_size) % srng->ring_size;
2231 
2232 	srng->u.src_ring.hp = hp;
2233 }
2234 
2235 /**
2236  * hal_srng_src_peek_n_get_next_next() - Get next to next, i.e HP + 2 entry from
2237  *                                       a ring without moving head pointer.
2238  * @hal_soc_hdl: Opaque HAL SOC handle
2239  * @hal_ring_hdl: Source ring pointer
2240  *
2241  * Return: Opaque pointer for next to next ring entry; NULL on failire
2242  */
2243 static inline
hal_srng_src_peek_n_get_next_next(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl)2244 void *hal_srng_src_peek_n_get_next_next(hal_soc_handle_t hal_soc_hdl,
2245 					hal_ring_handle_t hal_ring_hdl)
2246 {
2247 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2248 	uint32_t *desc;
2249 
2250 	/* TODO: Using % is expensive, but we have to do this since
2251 	 * size of some SRNG rings is not power of 2 (due to descriptor
2252 	 * sizes). Need to create separate API for rings used
2253 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
2254 	 * SW2RXDMA and CE rings)
2255 	 */
2256 	if ((((srng->u.src_ring.hp + (srng->entry_size)) %
2257 		srng->ring_size) != srng->u.src_ring.cached_tp) &&
2258 	    (((srng->u.src_ring.hp + (srng->entry_size * 2)) %
2259 		srng->ring_size) != srng->u.src_ring.cached_tp)) {
2260 		desc = &(srng->ring_base_vaddr[(srng->u.src_ring.hp +
2261 						(srng->entry_size * 2)) %
2262 						srng->ring_size]);
2263 		return (void *)desc;
2264 	}
2265 
2266 	return NULL;
2267 }
2268 
2269 /**
2270  * hal_srng_src_get_cur_hp_n_move_next() - API returns current hp
2271  *                                         and move hp to next in src ring
2272  * @hal_soc_hdl: HAL soc handle
2273  * @hal_ring_hdl: Source ring pointer
2274  *
2275  * This API should only be used at init time replenish.
2276  */
2277 static inline void *
hal_srng_src_get_cur_hp_n_move_next(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl)2278 hal_srng_src_get_cur_hp_n_move_next(hal_soc_handle_t hal_soc_hdl,
2279 				    hal_ring_handle_t hal_ring_hdl)
2280 {
2281 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2282 	uint32_t *cur_desc = NULL;
2283 	uint32_t next_hp;
2284 
2285 	cur_desc = &srng->ring_base_vaddr[(srng->u.src_ring.hp)];
2286 
2287 	next_hp = (srng->u.src_ring.hp + srng->entry_size) %
2288 		srng->ring_size;
2289 
2290 	if (next_hp != srng->u.src_ring.cached_tp)
2291 		srng->u.src_ring.hp = next_hp;
2292 
2293 	return (void *)cur_desc;
2294 }
2295 
2296 /**
2297  * hal_srng_src_num_avail() - Returns number of available entries in src ring
2298  * @hal_soc: Opaque HAL SOC handle
2299  * @hal_ring_hdl: Source ring pointer
2300  * @sync_hw_ptr: Sync cached tail pointer with HW
2301  *
2302  * Return: number of available entries
2303  */
2304 static inline uint32_t
hal_srng_src_num_avail(void * hal_soc,hal_ring_handle_t hal_ring_hdl,int sync_hw_ptr)2305 hal_srng_src_num_avail(void *hal_soc,
2306 		       hal_ring_handle_t hal_ring_hdl, int sync_hw_ptr)
2307 {
2308 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2309 	uint32_t tp;
2310 	uint32_t hp = srng->u.src_ring.hp;
2311 
2312 	if (sync_hw_ptr) {
2313 		tp = *(srng->u.src_ring.tp_addr);
2314 		srng->u.src_ring.cached_tp = tp;
2315 	} else {
2316 		tp = srng->u.src_ring.cached_tp;
2317 	}
2318 
2319 	if (tp > hp)
2320 		return ((tp - hp) / srng->entry_size) - 1;
2321 	else
2322 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
2323 }
2324 
2325 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING
2326 /**
2327  * hal_srng_clear_ring_usage_wm_locked() - Clear SRNG usage watermark stats
2328  * @hal_soc_hdl: HAL soc handle
2329  * @hal_ring_hdl: SRNG handle
2330  *
2331  * This function tries to acquire SRNG lock, and hence should not be called
2332  * from a context which has already acquired the SRNG lock.
2333  *
2334  * Return: None
2335  */
2336 static inline
hal_srng_clear_ring_usage_wm_locked(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl)2337 void hal_srng_clear_ring_usage_wm_locked(hal_soc_handle_t hal_soc_hdl,
2338 					 hal_ring_handle_t hal_ring_hdl)
2339 {
2340 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2341 
2342 	SRNG_LOCK(&srng->lock);
2343 	srng->high_wm.val = 0;
2344 	srng->high_wm.timestamp = 0;
2345 	qdf_mem_zero(&srng->high_wm.bins[0], sizeof(srng->high_wm.bins[0]) *
2346 					     HAL_SRNG_HIGH_WM_BIN_MAX);
2347 	SRNG_UNLOCK(&srng->lock);
2348 }
2349 
2350 /**
2351  * hal_srng_update_ring_usage_wm_no_lock() - Update the SRNG usage wm stats
2352  * @hal_soc_hdl: HAL soc handle
2353  * @hal_ring_hdl: SRNG handle
2354  *
2355  * This function should be called with the SRNG lock held.
2356  *
2357  * Return: None
2358  */
2359 static inline
hal_srng_update_ring_usage_wm_no_lock(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl)2360 void hal_srng_update_ring_usage_wm_no_lock(hal_soc_handle_t hal_soc_hdl,
2361 					   hal_ring_handle_t hal_ring_hdl)
2362 {
2363 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2364 	uint32_t curr_wm_val = 0;
2365 
2366 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
2367 		curr_wm_val = hal_srng_src_num_avail(hal_soc_hdl, hal_ring_hdl,
2368 						     0);
2369 	else
2370 		curr_wm_val = hal_srng_dst_num_valid(hal_soc_hdl, hal_ring_hdl,
2371 						     0);
2372 
2373 	if (curr_wm_val > srng->high_wm.val) {
2374 		srng->high_wm.val = curr_wm_val;
2375 		srng->high_wm.timestamp = qdf_get_system_timestamp();
2376 	}
2377 
2378 	if (curr_wm_val >=
2379 		srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_90_to_100])
2380 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_90_to_100]++;
2381 	else if (curr_wm_val >=
2382 		 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_80_to_90])
2383 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_80_to_90]++;
2384 	else if (curr_wm_val >=
2385 		 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_70_to_80])
2386 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_70_to_80]++;
2387 	else if (curr_wm_val >=
2388 		 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_60_to_70])
2389 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_60_to_70]++;
2390 	else if (curr_wm_val >=
2391 		 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_50_to_60])
2392 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_50_to_60]++;
2393 	else
2394 		srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_BELOW_50_PERCENT]++;
2395 }
2396 
2397 static inline
hal_dump_srng_high_wm_stats(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl,char * buf,int buf_len,int pos)2398 int hal_dump_srng_high_wm_stats(hal_soc_handle_t hal_soc_hdl,
2399 				hal_ring_handle_t hal_ring_hdl,
2400 				char *buf, int buf_len, int pos)
2401 {
2402 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2403 
2404 	return qdf_scnprintf(buf + pos, buf_len - pos,
2405 			     "%8u %7u %12llu %10u %10u %10u %10u %10u %10u",
2406 			     srng->ring_id, srng->high_wm.val,
2407 			     srng->high_wm.timestamp,
2408 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_BELOW_50_PERCENT],
2409 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_50_to_60],
2410 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_60_to_70],
2411 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_70_to_80],
2412 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_80_to_90],
2413 			     srng->high_wm.bins[HAL_SRNG_HIGH_WM_BIN_90_to_100]);
2414 }
2415 #else
2416 /**
2417  * hal_srng_clear_ring_usage_wm_locked() - Clear SRNG usage watermark stats
2418  * @hal_soc_hdl: HAL soc handle
2419  * @hal_ring_hdl: SRNG handle
2420  *
2421  * This function tries to acquire SRNG lock, and hence should not be called
2422  * from a context which has already acquired the SRNG lock.
2423  *
2424  * Return: None
2425  */
2426 static inline
hal_srng_clear_ring_usage_wm_locked(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl)2427 void hal_srng_clear_ring_usage_wm_locked(hal_soc_handle_t hal_soc_hdl,
2428 					 hal_ring_handle_t hal_ring_hdl)
2429 {
2430 }
2431 
2432 /**
2433  * hal_srng_update_ring_usage_wm_no_lock() - Update the SRNG usage wm stats
2434  * @hal_soc_hdl: HAL soc handle
2435  * @hal_ring_hdl: SRNG handle
2436  *
2437  * This function should be called with the SRNG lock held.
2438  *
2439  * Return: None
2440  */
2441 static inline
hal_srng_update_ring_usage_wm_no_lock(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl)2442 void hal_srng_update_ring_usage_wm_no_lock(hal_soc_handle_t hal_soc_hdl,
2443 					   hal_ring_handle_t hal_ring_hdl)
2444 {
2445 }
2446 
2447 static inline
hal_dump_srng_high_wm_stats(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl,char * buf,int buf_len,int pos)2448 int hal_dump_srng_high_wm_stats(hal_soc_handle_t hal_soc_hdl,
2449 				hal_ring_handle_t hal_ring_hdl,
2450 				char *buf, int buf_len, int pos)
2451 {
2452 	return 0;
2453 }
2454 #endif
2455 
2456 /**
2457  * hal_srng_access_end_unlocked() - End ring access (unlocked), update cached
2458  *                                  ring head/tail pointers to HW.
2459  * @hal_soc: Opaque HAL SOC handle
2460  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2461  *
2462  * The target expects cached head/tail pointer to be updated to the
2463  * shared location in the little-endian order, This API ensures that.
2464  * This API should be used only if hal_srng_access_start_unlocked was used to
2465  * start ring access
2466  *
2467  * Return: None
2468  */
2469 static inline void
hal_srng_access_end_unlocked(void * hal_soc,hal_ring_handle_t hal_ring_hdl)2470 hal_srng_access_end_unlocked(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
2471 {
2472 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2473 
2474 	/* TODO: See if we need a write memory barrier here */
2475 	if (srng->flags & HAL_SRNG_LMAC_RING) {
2476 		/* For LMAC rings, ring pointer updates are done through FW and
2477 		 * hence written to a shared memory location that is read by FW
2478 		 */
2479 		if (srng->ring_dir == HAL_SRNG_SRC_RING) {
2480 			*srng->u.src_ring.hp_addr =
2481 				qdf_cpu_to_le32(srng->u.src_ring.hp);
2482 		} else {
2483 			*srng->u.dst_ring.tp_addr =
2484 				qdf_cpu_to_le32(srng->u.dst_ring.tp);
2485 		}
2486 	} else {
2487 		if (srng->ring_dir == HAL_SRNG_SRC_RING)
2488 			hal_srng_write_address_32_mb(hal_soc,
2489 						     srng,
2490 						     srng->u.src_ring.hp_addr,
2491 						     srng->u.src_ring.hp);
2492 		else
2493 			hal_srng_write_address_32_mb(hal_soc,
2494 						     srng,
2495 						     srng->u.dst_ring.tp_addr,
2496 						     srng->u.dst_ring.tp);
2497 	}
2498 }
2499 
2500 /* hal_srng_access_end_unlocked already handles endianness conversion,
2501  * use the same.
2502  */
2503 #define hal_le_srng_access_end_unlocked_in_cpu_order \
2504 	hal_srng_access_end_unlocked
2505 
2506 /**
2507  * hal_srng_access_end() - Unlock ring access and update cached ring head/tail
2508  *                         pointers to HW
2509  * @hal_soc: Opaque HAL SOC handle
2510  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2511  *
2512  * The target expects cached head/tail pointer to be updated to the
2513  * shared location in the little-endian order, This API ensures that.
2514  * This API should be used only if hal_srng_access_start was used to
2515  * start ring access
2516  *
2517  */
2518 static inline void
hal_srng_access_end(void * hal_soc,hal_ring_handle_t hal_ring_hdl)2519 hal_srng_access_end(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
2520 {
2521 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2522 
2523 	if (qdf_unlikely(!hal_ring_hdl)) {
2524 		qdf_print("Error: Invalid hal_ring\n");
2525 		return;
2526 	}
2527 
2528 	hal_srng_access_end_unlocked(hal_soc, hal_ring_hdl);
2529 	SRNG_UNLOCK(&(srng->lock));
2530 }
2531 
2532 #ifdef FEATURE_RUNTIME_PM
2533 #define hal_srng_access_end_v1 hal_srng_rtpm_access_end
2534 
2535 /**
2536  * hal_srng_rtpm_access_end() - RTPM aware, Unlock ring access
2537  * @hal_soc_hdl: Opaque HAL SOC handle
2538  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2539  * @rtpm_id: RTPM debug id
2540  *
2541  * Function updates the HP/TP value to the hardware register.
2542  * The target expects cached head/tail pointer to be updated to the
2543  * shared location in the little-endian order, This API ensures that.
2544  * This API should be used only if hal_srng_access_start was used to
2545  * start ring access
2546  *
2547  * Return: None
2548  */
2549 void
2550 hal_srng_rtpm_access_end(hal_soc_handle_t hal_soc_hdl,
2551 			 hal_ring_handle_t hal_ring_hdl,
2552 			 uint32_t rtpm_id);
2553 #else
2554 #define hal_srng_access_end_v1(hal_soc_hdl, hal_ring_hdl, rtpm_id) \
2555 	hal_srng_access_end(hal_soc_hdl, hal_ring_hdl)
2556 #endif
2557 
2558 /* hal_srng_access_end already handles endianness conversion, so use the same */
2559 #define hal_le_srng_access_end_in_cpu_order \
2560 	hal_srng_access_end
2561 
2562 /**
2563  * hal_srng_access_end_reap() - Unlock ring access
2564  * @hal_soc: Opaque HAL SOC handle
2565  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2566  *
2567  * This should be used only if hal_srng_access_start to start ring access
2568  * and should be used only while reaping SRC ring completions
2569  *
2570  * Return: 0 on success; error on failire
2571  */
2572 static inline void
hal_srng_access_end_reap(void * hal_soc,hal_ring_handle_t hal_ring_hdl)2573 hal_srng_access_end_reap(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
2574 {
2575 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2576 
2577 	SRNG_UNLOCK(&(srng->lock));
2578 }
2579 
2580 /* TODO: Check if the following definitions is available in HW headers */
2581 #define WBM_IDLE_SCATTER_BUF_SIZE 32704
2582 #define NUM_MPDUS_PER_LINK_DESC 6
2583 #define NUM_MSDUS_PER_LINK_DESC 7
2584 #define REO_QUEUE_DESC_ALIGN 128
2585 
2586 #define LINK_DESC_ALIGN 128
2587 
2588 #define ADDRESS_MATCH_TAG_VAL 0x5
2589 /* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in
2590  * of TX_MPDU_QUEUE_EXT. We are defining a common average count here
2591  */
2592 #define NUM_MPDU_LINKS_PER_QUEUE_DESC 12
2593 
2594 /* TODO: Check with HW team on the scatter buffer size supported. As per WBM
2595  * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size
2596  * should be specified in 16 word units. But the number of bits defined for
2597  * this field in HW header files is 5.
2598  */
2599 #define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8
2600 
2601 
2602 /**
2603  * hal_idle_list_scatter_buf_size() - Get the size of each scatter buffer
2604  *                                    in an idle list
2605  * @hal_soc_hdl: Opaque HAL SOC handle
2606  *
2607  * Return: scatter buffer size
2608  */
2609 static inline
hal_idle_list_scatter_buf_size(hal_soc_handle_t hal_soc_hdl)2610 uint32_t hal_idle_list_scatter_buf_size(hal_soc_handle_t hal_soc_hdl)
2611 {
2612 	return WBM_IDLE_SCATTER_BUF_SIZE;
2613 }
2614 
2615 /**
2616  * hal_get_link_desc_size() - Get the size of each link descriptor
2617  * @hal_soc_hdl: Opaque HAL SOC handle
2618  *
2619  * Return: link descriptor size
2620  */
hal_get_link_desc_size(hal_soc_handle_t hal_soc_hdl)2621 static inline uint32_t hal_get_link_desc_size(hal_soc_handle_t hal_soc_hdl)
2622 {
2623 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2624 
2625 	if (!hal_soc || !hal_soc->ops) {
2626 		qdf_print("Error: Invalid ops\n");
2627 		QDF_BUG(0);
2628 		return -EINVAL;
2629 	}
2630 	if (!hal_soc->ops->hal_get_link_desc_size) {
2631 		qdf_print("Error: Invalid function pointer\n");
2632 		QDF_BUG(0);
2633 		return -EINVAL;
2634 	}
2635 	return hal_soc->ops->hal_get_link_desc_size();
2636 }
2637 
2638 /**
2639  * hal_get_link_desc_align() - Get the required start address alignment for
2640  *                             link descriptors
2641  * @hal_soc_hdl: Opaque HAL SOC handle
2642  *
2643  * Return: the required alignment
2644  */
2645 static inline
hal_get_link_desc_align(hal_soc_handle_t hal_soc_hdl)2646 uint32_t hal_get_link_desc_align(hal_soc_handle_t hal_soc_hdl)
2647 {
2648 	return LINK_DESC_ALIGN;
2649 }
2650 
2651 /**
2652  * hal_num_mpdus_per_link_desc() - Get number of mpdus each link desc can hold
2653  * @hal_soc_hdl: Opaque HAL SOC handle
2654  *
2655  * Return: number of MPDUs
2656  */
2657 static inline
hal_num_mpdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)2658 uint32_t hal_num_mpdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
2659 {
2660 	return NUM_MPDUS_PER_LINK_DESC;
2661 }
2662 
2663 /**
2664  * hal_num_msdus_per_link_desc() - Get number of msdus each link desc can hold
2665  * @hal_soc_hdl: Opaque HAL SOC handle
2666  *
2667  * Return: number of MSDUs
2668  */
2669 static inline
hal_num_msdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)2670 uint32_t hal_num_msdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
2671 {
2672 	return NUM_MSDUS_PER_LINK_DESC;
2673 }
2674 
2675 /**
2676  * hal_num_mpdu_links_per_queue_desc() - Get number of mpdu links each queue
2677  *                                       descriptor can hold
2678  * @hal_soc_hdl: Opaque HAL SOC handle
2679  *
2680  * Return: number of links per queue descriptor
2681  */
2682 static inline
hal_num_mpdu_links_per_queue_desc(hal_soc_handle_t hal_soc_hdl)2683 uint32_t hal_num_mpdu_links_per_queue_desc(hal_soc_handle_t hal_soc_hdl)
2684 {
2685 	return NUM_MPDU_LINKS_PER_QUEUE_DESC;
2686 }
2687 
2688 /**
2689  * hal_idle_scatter_buf_num_entries() - Get the number of link desc entries
2690  *                                      that the given buffer size
2691  * @hal_soc_hdl: Opaque HAL SOC handle
2692  * @scatter_buf_size: Size of scatter buffer
2693  *
2694  * Return: number of entries
2695  */
2696 static inline
hal_idle_scatter_buf_num_entries(hal_soc_handle_t hal_soc_hdl,uint32_t scatter_buf_size)2697 uint32_t hal_idle_scatter_buf_num_entries(hal_soc_handle_t hal_soc_hdl,
2698 					  uint32_t scatter_buf_size)
2699 {
2700 	return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) /
2701 		hal_srng_get_entrysize(hal_soc_hdl, WBM_IDLE_LINK);
2702 }
2703 
2704 /**
2705  * hal_idle_list_num_scatter_bufs() - Get the number of scatter buffer
2706  *                                    each given buffer size
2707  * @hal_soc_hdl: Opaque HAL SOC handle
2708  * @total_mem: size of memory to be scattered
2709  * @scatter_buf_size: Size of scatter buffer
2710  *
2711  * Return: number of idle list scatter buffers
2712  */
2713 static inline
hal_idle_list_num_scatter_bufs(hal_soc_handle_t hal_soc_hdl,uint32_t total_mem,uint32_t scatter_buf_size)2714 uint32_t hal_idle_list_num_scatter_bufs(hal_soc_handle_t hal_soc_hdl,
2715 					uint32_t total_mem,
2716 					uint32_t scatter_buf_size)
2717 {
2718 	uint8_t rem = (total_mem % (scatter_buf_size -
2719 			WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) ? 1 : 0;
2720 
2721 	uint32_t num_scatter_bufs = (total_mem / (scatter_buf_size -
2722 				WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) + rem;
2723 
2724 	return num_scatter_bufs;
2725 }
2726 
2727 enum hal_pn_type {
2728 	HAL_PN_NONE,
2729 	HAL_PN_WPA,
2730 	HAL_PN_WAPI_EVEN,
2731 	HAL_PN_WAPI_UNEVEN,
2732 };
2733 
2734 #define HAL_RX_BA_WINDOW_256 256
2735 #define HAL_RX_BA_WINDOW_1024 1024
2736 
2737 /**
2738  * hal_get_reo_qdesc_align() - Get start address alignment for reo
2739  *                             queue descriptors
2740  * @hal_soc_hdl: Opaque HAL SOC handle
2741  *
2742  * Return: required start address alignment
2743  */
2744 static inline
hal_get_reo_qdesc_align(hal_soc_handle_t hal_soc_hdl)2745 uint32_t hal_get_reo_qdesc_align(hal_soc_handle_t hal_soc_hdl)
2746 {
2747 	return REO_QUEUE_DESC_ALIGN;
2748 }
2749 
2750 /**
2751  * hal_srng_get_hp_addr() - Get head pointer physical address
2752  * @hal_soc: Opaque HAL SOC handle
2753  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2754  *
2755  * Return: head pointer physical address
2756  */
2757 static inline qdf_dma_addr_t
hal_srng_get_hp_addr(void * hal_soc,hal_ring_handle_t hal_ring_hdl)2758 hal_srng_get_hp_addr(void *hal_soc,
2759 		     hal_ring_handle_t hal_ring_hdl)
2760 {
2761 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2762 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
2763 
2764 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
2765 		if (srng->flags & HAL_SRNG_LMAC_RING)
2766 			return hal->shadow_wrptr_mem_paddr +
2767 				 ((unsigned long)(srng->u.src_ring.hp_addr) -
2768 				  (unsigned long)(hal->shadow_wrptr_mem_vaddr));
2769 		else if (ignore_shadow)
2770 			return (qdf_dma_addr_t)srng->u.src_ring.hp_addr;
2771 		else
2772 			return ((struct hif_softc *)hal->hif_handle)->mem_pa +
2773 				((unsigned long)srng->u.src_ring.hp_addr -
2774 				 (unsigned long)hal->dev_base_addr);
2775 
2776 	} else {
2777 		return hal->shadow_rdptr_mem_paddr +
2778 		  ((unsigned long)(srng->u.dst_ring.hp_addr) -
2779 		   (unsigned long)(hal->shadow_rdptr_mem_vaddr));
2780 	}
2781 }
2782 
2783 /**
2784  * hal_srng_get_tp_addr() - Get tail pointer physical address
2785  * @hal_soc: Opaque HAL SOC handle
2786  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2787  *
2788  * Return: tail pointer physical address
2789  */
2790 static inline qdf_dma_addr_t
hal_srng_get_tp_addr(void * hal_soc,hal_ring_handle_t hal_ring_hdl)2791 hal_srng_get_tp_addr(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
2792 {
2793 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2794 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
2795 
2796 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
2797 		return hal->shadow_rdptr_mem_paddr +
2798 			((unsigned long)(srng->u.src_ring.tp_addr) -
2799 			(unsigned long)(hal->shadow_rdptr_mem_vaddr));
2800 	} else {
2801 		if (srng->flags & HAL_SRNG_LMAC_RING)
2802 			return hal->shadow_wrptr_mem_paddr +
2803 				((unsigned long)(srng->u.dst_ring.tp_addr) -
2804 				 (unsigned long)(hal->shadow_wrptr_mem_vaddr));
2805 		else if (ignore_shadow)
2806 			return (qdf_dma_addr_t)srng->u.dst_ring.tp_addr;
2807 		else
2808 			return ((struct hif_softc *)hal->hif_handle)->mem_pa +
2809 				((unsigned long)srng->u.dst_ring.tp_addr -
2810 				 (unsigned long)hal->dev_base_addr);
2811 	}
2812 }
2813 
2814 /**
2815  * hal_srng_get_num_entries() - Get total entries in the HAL Srng
2816  * @hal_soc_hdl: Opaque HAL SOC handle
2817  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2818  *
2819  * Return: total number of entries in hal ring
2820  */
2821 static inline
hal_srng_get_num_entries(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl)2822 uint32_t hal_srng_get_num_entries(hal_soc_handle_t hal_soc_hdl,
2823 				  hal_ring_handle_t hal_ring_hdl)
2824 {
2825 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2826 
2827 	return srng->num_entries;
2828 }
2829 
2830 /**
2831  * hal_get_srng_params() - Retrieve SRNG parameters for a given ring from HAL
2832  * @hal_soc_hdl: Opaque HAL SOC handle
2833  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2834  * @ring_params: SRNG parameters will be returned through this structure
2835  */
2836 void hal_get_srng_params(hal_soc_handle_t hal_soc_hdl,
2837 			 hal_ring_handle_t hal_ring_hdl,
2838 			 struct hal_srng_params *ring_params);
2839 
2840 /**
2841  * hal_get_meminfo() - Retrieve hal memory base address
2842  * @hal_soc_hdl: Opaque HAL SOC handle
2843  * @mem: pointer to structure to be updated with hal mem info
2844  */
2845 void hal_get_meminfo(hal_soc_handle_t hal_soc_hdl, struct hal_mem_info *mem);
2846 
2847 /**
2848  * hal_get_target_type() - Return target type
2849  * @hal_soc_hdl: Opaque HAL SOC handle
2850  *
2851  * Return: target type
2852  */
2853 uint32_t hal_get_target_type(hal_soc_handle_t hal_soc_hdl);
2854 
2855 /**
2856  * hal_srng_dst_hw_init() - Private function to initialize SRNG
2857  *                          destination ring HW
2858  * @hal: HAL SOC handle
2859  * @srng: SRNG ring pointer
2860  * @idle_check: Check if ring is idle
2861  * @idx: Ring index
2862  */
hal_srng_dst_hw_init(struct hal_soc * hal,struct hal_srng * srng,bool idle_check,uint16_t idx)2863 static inline void hal_srng_dst_hw_init(struct hal_soc *hal,
2864 					struct hal_srng *srng, bool idle_check,
2865 					uint16_t idx)
2866 {
2867 	hal->ops->hal_srng_dst_hw_init(hal, srng, idle_check, idx);
2868 }
2869 
2870 /**
2871  * hal_srng_src_hw_init() - Private function to initialize SRNG
2872  *                          source ring HW
2873  * @hal: HAL SOC handle
2874  * @srng: SRNG ring pointer
2875  * @idle_check: Check if ring is idle
2876  * @idx: Ring index
2877  */
hal_srng_src_hw_init(struct hal_soc * hal,struct hal_srng * srng,bool idle_check,uint16_t idx)2878 static inline void hal_srng_src_hw_init(struct hal_soc *hal,
2879 					struct hal_srng *srng, bool idle_check,
2880 					uint16_t idx)
2881 {
2882 	hal->ops->hal_srng_src_hw_init(hal, srng, idle_check, idx);
2883 }
2884 
2885 /**
2886  * hal_srng_hw_disable() - Private function to disable SRNG
2887  *                         source ring HW
2888  * @hal_soc: HAL SOC handle
2889  * @srng: SRNG ring pointer
2890  */
2891 static inline
hal_srng_hw_disable(struct hal_soc * hal_soc,struct hal_srng * srng)2892 void hal_srng_hw_disable(struct hal_soc *hal_soc, struct hal_srng *srng)
2893 {
2894 	if (hal_soc->ops->hal_srng_hw_disable)
2895 		hal_soc->ops->hal_srng_hw_disable(hal_soc, srng);
2896 }
2897 
2898 /**
2899  * hal_get_hw_hptp()  - Get HW head and tail pointer value for any ring
2900  * @hal_soc_hdl: Opaque HAL SOC handle
2901  * @hal_ring_hdl: Source ring pointer
2902  * @headp: Head Pointer
2903  * @tailp: Tail Pointer
2904  * @ring_type: Ring
2905  *
2906  * Return: Update tail pointer and head pointer in arguments.
2907  */
2908 static inline
hal_get_hw_hptp(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl,uint32_t * headp,uint32_t * tailp,uint8_t ring_type)2909 void hal_get_hw_hptp(hal_soc_handle_t hal_soc_hdl,
2910 		     hal_ring_handle_t hal_ring_hdl,
2911 		     uint32_t *headp, uint32_t *tailp,
2912 		     uint8_t ring_type)
2913 {
2914 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2915 
2916 	hal_soc->ops->hal_get_hw_hptp(hal_soc, hal_ring_hdl,
2917 			headp, tailp, ring_type);
2918 }
2919 
2920 /**
2921  * hal_reo_setup() - Initialize HW REO block
2922  * @hal_soc_hdl: Opaque HAL SOC handle
2923  * @reoparams: parameters needed by HAL for REO config
2924  * @qref_reset: reset qref
2925  */
hal_reo_setup(hal_soc_handle_t hal_soc_hdl,void * reoparams,int qref_reset)2926 static inline void hal_reo_setup(hal_soc_handle_t hal_soc_hdl,
2927 				 void *reoparams, int qref_reset)
2928 {
2929 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2930 
2931 	hal_soc->ops->hal_reo_setup(hal_soc, reoparams, qref_reset);
2932 }
2933 
2934 static inline
hal_compute_reo_remap_ix2_ix3(hal_soc_handle_t hal_soc_hdl,uint32_t * ring,uint32_t num_rings,uint32_t * remap1,uint32_t * remap2)2935 void hal_compute_reo_remap_ix2_ix3(hal_soc_handle_t hal_soc_hdl,
2936 				   uint32_t *ring, uint32_t num_rings,
2937 				   uint32_t *remap1, uint32_t *remap2)
2938 {
2939 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2940 
2941 	return hal_soc->ops->hal_compute_reo_remap_ix2_ix3(ring,
2942 					num_rings, remap1, remap2);
2943 }
2944 
2945 static inline
hal_compute_reo_remap_ix0(hal_soc_handle_t hal_soc_hdl,uint32_t * remap0)2946 void hal_compute_reo_remap_ix0(hal_soc_handle_t hal_soc_hdl, uint32_t *remap0)
2947 {
2948 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2949 
2950 	if (hal_soc->ops->hal_compute_reo_remap_ix0)
2951 		hal_soc->ops->hal_compute_reo_remap_ix0(remap0);
2952 }
2953 
2954 /**
2955  * hal_setup_link_idle_list() - Setup scattered idle list using the
2956  *                              buffer list provided
2957  * @hal_soc_hdl: Opaque HAL SOC handle
2958  * @scatter_bufs_base_paddr: Array of physical base addresses
2959  * @scatter_bufs_base_vaddr: Array of virtual base addresses
2960  * @num_scatter_bufs: Number of scatter buffers in the above lists
2961  * @scatter_buf_size: Size of each scatter buffer
2962  * @last_buf_end_offset: Offset to the last entry
2963  * @num_entries: Total entries of all scatter bufs
2964  *
2965  */
2966 static inline
hal_setup_link_idle_list(hal_soc_handle_t hal_soc_hdl,qdf_dma_addr_t scatter_bufs_base_paddr[],void * scatter_bufs_base_vaddr[],uint32_t num_scatter_bufs,uint32_t scatter_buf_size,uint32_t last_buf_end_offset,uint32_t num_entries)2967 void hal_setup_link_idle_list(hal_soc_handle_t hal_soc_hdl,
2968 			      qdf_dma_addr_t scatter_bufs_base_paddr[],
2969 			      void *scatter_bufs_base_vaddr[],
2970 			      uint32_t num_scatter_bufs,
2971 			      uint32_t scatter_buf_size,
2972 			      uint32_t last_buf_end_offset,
2973 			      uint32_t num_entries)
2974 {
2975 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2976 
2977 	hal_soc->ops->hal_setup_link_idle_list(hal_soc, scatter_bufs_base_paddr,
2978 			scatter_bufs_base_vaddr, num_scatter_bufs,
2979 			scatter_buf_size, last_buf_end_offset,
2980 			num_entries);
2981 
2982 }
2983 
2984 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
2985 /**
2986  * hal_dump_rx_reo_queue_desc() - Dump reo queue descriptor fields
2987  * @hw_qdesc_vaddr_aligned: Pointer to hw reo queue desc virtual addr
2988  *
2989  * Use the virtual addr pointer to reo h/w queue desc to read
2990  * the values from ddr and log them.
2991  *
2992  * Return: none
2993  */
hal_dump_rx_reo_queue_desc(void * hw_qdesc_vaddr_aligned)2994 static inline void hal_dump_rx_reo_queue_desc(
2995 	void *hw_qdesc_vaddr_aligned)
2996 {
2997 	struct rx_reo_queue *hw_qdesc =
2998 		(struct rx_reo_queue *)hw_qdesc_vaddr_aligned;
2999 
3000 	if (!hw_qdesc)
3001 		return;
3002 
3003 	hal_info("receive_queue_number %u vld %u window_jump_2k %u"
3004 		 " hole_count %u ba_window_size %u ignore_ampdu_flag %u"
3005 		 " svld %u ssn %u current_index %u"
3006 		 " disable_duplicate_detection %u soft_reorder_enable %u"
3007 		 " chk_2k_mode %u oor_mode %u mpdu_frames_processed_count %u"
3008 		 " msdu_frames_processed_count %u total_processed_byte_count %u"
3009 		 " late_receive_mpdu_count %u seq_2k_error_detected_flag %u"
3010 		 " pn_error_detected_flag %u current_mpdu_count %u"
3011 		 " current_msdu_count %u timeout_count %u"
3012 		 " forward_due_to_bar_count %u duplicate_count %u"
3013 		 " frames_in_order_count %u bar_received_count %u"
3014 		 " pn_check_needed %u pn_shall_be_even %u"
3015 		 " pn_shall_be_uneven %u pn_size %u",
3016 		 hw_qdesc->receive_queue_number,
3017 		 hw_qdesc->vld,
3018 		 hw_qdesc->window_jump_2k,
3019 		 hw_qdesc->hole_count,
3020 		 hw_qdesc->ba_window_size,
3021 		 hw_qdesc->ignore_ampdu_flag,
3022 		 hw_qdesc->svld,
3023 		 hw_qdesc->ssn,
3024 		 hw_qdesc->current_index,
3025 		 hw_qdesc->disable_duplicate_detection,
3026 		 hw_qdesc->soft_reorder_enable,
3027 		 hw_qdesc->chk_2k_mode,
3028 		 hw_qdesc->oor_mode,
3029 		 hw_qdesc->mpdu_frames_processed_count,
3030 		 hw_qdesc->msdu_frames_processed_count,
3031 		 hw_qdesc->total_processed_byte_count,
3032 		 hw_qdesc->late_receive_mpdu_count,
3033 		 hw_qdesc->seq_2k_error_detected_flag,
3034 		 hw_qdesc->pn_error_detected_flag,
3035 		 hw_qdesc->current_mpdu_count,
3036 		 hw_qdesc->current_msdu_count,
3037 		 hw_qdesc->timeout_count,
3038 		 hw_qdesc->forward_due_to_bar_count,
3039 		 hw_qdesc->duplicate_count,
3040 		 hw_qdesc->frames_in_order_count,
3041 		 hw_qdesc->bar_received_count,
3042 		 hw_qdesc->pn_check_needed,
3043 		 hw_qdesc->pn_shall_be_even,
3044 		 hw_qdesc->pn_shall_be_uneven,
3045 		 hw_qdesc->pn_size);
3046 }
3047 
3048 #else /* DUMP_REO_QUEUE_INFO_IN_DDR */
3049 
hal_dump_rx_reo_queue_desc(void * hw_qdesc_vaddr_aligned)3050 static inline void hal_dump_rx_reo_queue_desc(
3051 	void *hw_qdesc_vaddr_aligned)
3052 {
3053 }
3054 #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
3055 
3056 /**
3057  * hal_srng_dump_ring_desc() - Dump ring descriptor info
3058  * @hal_soc_hdl: Opaque HAL SOC handle
3059  * @hal_ring_hdl: Source ring pointer
3060  * @ring_desc: Opaque ring descriptor handle
3061  */
hal_srng_dump_ring_desc(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl,hal_ring_desc_t ring_desc)3062 static inline void hal_srng_dump_ring_desc(hal_soc_handle_t hal_soc_hdl,
3063 					   hal_ring_handle_t hal_ring_hdl,
3064 					   hal_ring_desc_t ring_desc)
3065 {
3066 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3067 
3068 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
3069 			   ring_desc, (srng->entry_size << 2));
3070 }
3071 
3072 /**
3073  * hal_srng_dump_ring() - Dump last 128 descs of the ring
3074  * @hal_soc_hdl: Opaque HAL SOC handle
3075  * @hal_ring_hdl: Source ring pointer
3076  */
hal_srng_dump_ring(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl)3077 static inline void hal_srng_dump_ring(hal_soc_handle_t hal_soc_hdl,
3078 				      hal_ring_handle_t hal_ring_hdl)
3079 {
3080 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3081 	uint32_t *desc;
3082 	uint32_t tp, i;
3083 
3084 	tp = srng->u.dst_ring.tp;
3085 
3086 	for (i = 0; i < 128; i++) {
3087 		if (!tp)
3088 			tp = srng->ring_size;
3089 
3090 		desc = &srng->ring_base_vaddr[tp - srng->entry_size];
3091 		QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP,
3092 				   QDF_TRACE_LEVEL_DEBUG,
3093 				   desc, (srng->entry_size << 2));
3094 
3095 		tp -= srng->entry_size;
3096 	}
3097 }
3098 
3099 /**
3100  * hal_rxdma_desc_to_hal_ring_desc() - API to convert rxdma ring desc
3101  *                                     to opaque dp_ring desc type
3102  * @ring_desc: rxdma ring desc
3103  *
3104  * Return: hal_rxdma_desc_t type
3105  */
3106 static inline
hal_rxdma_desc_to_hal_ring_desc(hal_rxdma_desc_t ring_desc)3107 hal_ring_desc_t hal_rxdma_desc_to_hal_ring_desc(hal_rxdma_desc_t ring_desc)
3108 {
3109 	return (hal_ring_desc_t)ring_desc;
3110 }
3111 
3112 /**
3113  * hal_srng_set_event() - Set hal_srng event
3114  * @hal_ring_hdl: Source ring pointer
3115  * @event: SRNG ring event
3116  *
3117  * Return: None
3118  */
hal_srng_set_event(hal_ring_handle_t hal_ring_hdl,int event)3119 static inline void hal_srng_set_event(hal_ring_handle_t hal_ring_hdl, int event)
3120 {
3121 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3122 
3123 	qdf_atomic_set_bit(event, &srng->srng_event);
3124 }
3125 
3126 /**
3127  * hal_srng_clear_event() - Clear hal_srng event
3128  * @hal_ring_hdl: Source ring pointer
3129  * @event: SRNG ring event
3130  *
3131  * Return: None
3132  */
3133 static inline
hal_srng_clear_event(hal_ring_handle_t hal_ring_hdl,int event)3134 void hal_srng_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
3135 {
3136 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3137 
3138 	qdf_atomic_clear_bit(event, &srng->srng_event);
3139 }
3140 
3141 /**
3142  * hal_srng_get_clear_event() - Clear srng event and return old value
3143  * @hal_ring_hdl: Source ring pointer
3144  * @event: SRNG ring event
3145  *
3146  * Return: Return old event value
3147  */
3148 static inline
hal_srng_get_clear_event(hal_ring_handle_t hal_ring_hdl,int event)3149 int hal_srng_get_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
3150 {
3151 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3152 
3153 	return qdf_atomic_test_and_clear_bit(event, &srng->srng_event);
3154 }
3155 
3156 /**
3157  * hal_srng_set_flush_last_ts() - Record last flush time stamp
3158  * @hal_ring_hdl: Source ring pointer
3159  *
3160  * Return: None
3161  */
hal_srng_set_flush_last_ts(hal_ring_handle_t hal_ring_hdl)3162 static inline void hal_srng_set_flush_last_ts(hal_ring_handle_t hal_ring_hdl)
3163 {
3164 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3165 
3166 	srng->last_flush_ts = qdf_get_log_timestamp();
3167 }
3168 
3169 /**
3170  * hal_srng_inc_flush_cnt() - Increment flush counter
3171  * @hal_ring_hdl: Source ring pointer
3172  *
3173  * Return: None
3174  */
hal_srng_inc_flush_cnt(hal_ring_handle_t hal_ring_hdl)3175 static inline void hal_srng_inc_flush_cnt(hal_ring_handle_t hal_ring_hdl)
3176 {
3177 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3178 
3179 	srng->flush_count++;
3180 }
3181 
3182 /**
3183  * hal_rx_sw_mon_desc_info_get() - Get SW monitor desc info
3184  * @hal: Core HAL soc handle
3185  * @ring_desc: Mon dest ring descriptor
3186  * @desc_info: Desc info to be populated
3187  *
3188  * Return void
3189  */
3190 static inline void
hal_rx_sw_mon_desc_info_get(struct hal_soc * hal,hal_ring_desc_t ring_desc,hal_rx_mon_desc_info_t desc_info)3191 hal_rx_sw_mon_desc_info_get(struct hal_soc *hal,
3192 			    hal_ring_desc_t ring_desc,
3193 			    hal_rx_mon_desc_info_t desc_info)
3194 {
3195 	return hal->ops->hal_rx_sw_mon_desc_info_get(ring_desc, desc_info);
3196 }
3197 
3198 /**
3199  * hal_reo_set_err_dst_remap() - Set REO error destination ring remap
3200  *				 register value.
3201  *
3202  * @hal_soc_hdl: Opaque HAL soc handle
3203  *
3204  * Return: None
3205  */
hal_reo_set_err_dst_remap(hal_soc_handle_t hal_soc_hdl)3206 static inline void hal_reo_set_err_dst_remap(hal_soc_handle_t hal_soc_hdl)
3207 {
3208 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
3209 
3210 	if (hal_soc->ops->hal_reo_set_err_dst_remap)
3211 		hal_soc->ops->hal_reo_set_err_dst_remap(hal_soc);
3212 }
3213 
3214 /**
3215  * hal_reo_enable_pn_in_dest() - Subscribe for previous PN for 2k-jump or
3216  *			OOR error frames
3217  * @hal_soc_hdl: Opaque HAL soc handle
3218  *
3219  * Return: true if feature is enabled,
3220  *	false, otherwise.
3221  */
3222 static inline uint8_t
hal_reo_enable_pn_in_dest(hal_soc_handle_t hal_soc_hdl)3223 hal_reo_enable_pn_in_dest(hal_soc_handle_t hal_soc_hdl)
3224 {
3225 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
3226 
3227 	if (hal_soc->ops->hal_reo_enable_pn_in_dest)
3228 		return hal_soc->ops->hal_reo_enable_pn_in_dest(hal_soc);
3229 
3230 	return 0;
3231 }
3232 
3233 #ifdef GENERIC_SHADOW_REGISTER_ACCESS_ENABLE
3234 
3235 /**
3236  * hal_set_one_target_reg_config() - Populate the target reg
3237  * offset in hal_soc for one non srng related register at the
3238  * given list index
3239  * @hal: hal handle
3240  * @target_reg_offset: target register offset
3241  * @list_index: index in hal list for shadow regs
3242  *
3243  * Return: none
3244  */
3245 void hal_set_one_target_reg_config(struct hal_soc *hal,
3246 				   uint32_t target_reg_offset,
3247 				   int list_index);
3248 
3249 /**
3250  * hal_set_shadow_regs() - Populate register offset for
3251  * registers that need to be populated in list_shadow_reg_config
3252  * in order to be sent to FW. These reg offsets will be mapped
3253  * to shadow registers.
3254  * @hal_soc: hal handle
3255  *
3256  * Return: QDF_STATUS_OK on success
3257  */
3258 QDF_STATUS hal_set_shadow_regs(void *hal_soc);
3259 
3260 /**
3261  * hal_construct_shadow_regs() - initialize the shadow registers
3262  * for non-srng related register configs
3263  * @hal_soc: hal handle
3264  *
3265  * Return: QDF_STATUS_OK on success
3266  */
3267 QDF_STATUS hal_construct_shadow_regs(void *hal_soc);
3268 
3269 #else /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */
hal_set_one_target_reg_config(struct hal_soc * hal,uint32_t target_reg_offset,int list_index)3270 static inline void hal_set_one_target_reg_config(
3271 	struct hal_soc *hal,
3272 	uint32_t target_reg_offset,
3273 	int list_index)
3274 {
3275 }
3276 
hal_set_shadow_regs(void * hal_soc)3277 static inline QDF_STATUS hal_set_shadow_regs(void *hal_soc)
3278 {
3279 	return QDF_STATUS_SUCCESS;
3280 }
3281 
hal_construct_shadow_regs(void * hal_soc)3282 static inline QDF_STATUS hal_construct_shadow_regs(void *hal_soc)
3283 {
3284 	return QDF_STATUS_SUCCESS;
3285 }
3286 #endif /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */
3287 
3288 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
3289 /**
3290  * hal_flush_reg_write_work() - flush all writes from register write queue
3291  * @hal_handle: hal_soc pointer
3292  *
3293  * Return: None
3294  */
3295 void hal_flush_reg_write_work(hal_soc_handle_t hal_handle);
3296 
3297 #else
hal_flush_reg_write_work(hal_soc_handle_t hal_handle)3298 static inline void hal_flush_reg_write_work(hal_soc_handle_t hal_handle) { }
3299 #endif
3300 
3301 /**
3302  * hal_get_ring_usage() - Calculate the ring usage percentage
3303  * @hal_ring_hdl: Ring pointer
3304  * @ring_type: Ring type
3305  * @headp: pointer to head value
3306  * @tailp: pointer to tail value
3307  *
3308  * Calculate the ring usage percentage for src and dest rings
3309  *
3310  * Return: Ring usage percentage
3311  */
3312 static inline
hal_get_ring_usage(hal_ring_handle_t hal_ring_hdl,enum hal_ring_type ring_type,uint32_t * headp,uint32_t * tailp)3313 uint32_t hal_get_ring_usage(
3314 	hal_ring_handle_t hal_ring_hdl,
3315 	enum hal_ring_type ring_type, uint32_t *headp, uint32_t *tailp)
3316 {
3317 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3318 	uint32_t num_avail, num_valid = 0;
3319 	uint32_t ring_usage;
3320 
3321 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
3322 		if (*tailp > *headp)
3323 			num_avail =  ((*tailp - *headp) / srng->entry_size) - 1;
3324 		else
3325 			num_avail = ((srng->ring_size - *headp + *tailp) /
3326 				     srng->entry_size) - 1;
3327 		if (ring_type == WBM_IDLE_LINK)
3328 			num_valid = num_avail;
3329 		else
3330 			num_valid = srng->num_entries - num_avail;
3331 	} else {
3332 		if (*headp >= *tailp)
3333 			num_valid = ((*headp - *tailp) / srng->entry_size);
3334 		else
3335 			num_valid = ((srng->ring_size - *tailp + *headp) /
3336 				     srng->entry_size);
3337 	}
3338 	ring_usage = (100 * num_valid) / srng->num_entries;
3339 	return ring_usage;
3340 }
3341 
3342 /*
3343  * hal_update_ring_util_stats - API for tracking ring utlization
3344  * @hal_soc: Opaque HAL SOC handle
3345  * @hal_ring_hdl: Source ring pointer
3346  * @ring_type: Ring type
3347  * @ring_util_stats: Ring utilisation structure
3348  */
3349 static inline
hal_update_ring_util(void * hal_soc,hal_ring_handle_t hal_ring_hdl,enum hal_ring_type ring_type,struct ring_util_stats * ring_utilisation)3350 void hal_update_ring_util(void *hal_soc, hal_ring_handle_t hal_ring_hdl,
3351 			  enum hal_ring_type ring_type,
3352 			  struct ring_util_stats *ring_utilisation)
3353 {
3354 	uint32_t tailp, headp, ring_usage;
3355 
3356 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tailp, &headp);
3357 	ring_usage = hal_get_ring_usage(hal_ring_hdl, ring_type, &headp,
3358 					&tailp);
3359 
3360 	if (ring_usage == RING_USAGE_100_PERCENTAGE) {
3361 		ring_utilisation->util[RING_USAGE_100]++;
3362 	} else if (ring_usage > RING_USAGE_90_PERCENTAGE) {
3363 		ring_utilisation->util[RING_USAGE_GREAT_90]++;
3364 	} else if ((ring_usage > RING_USAGE_70_PERCENTAGE) &&
3365 		   (ring_usage <= RING_USAGE_90_PERCENTAGE)) {
3366 		ring_utilisation->util[RING_USAGE_70_TO_90]++;
3367 	} else if ((ring_usage > RING_USAGE_50_PERCENTAGE) &&
3368 		   (ring_usage <= RING_USAGE_70_PERCENTAGE)) {
3369 		ring_utilisation->util[RING_USAGE_50_TO_70]++;
3370 	} else {
3371 		ring_utilisation->util[RING_USAGE_LESS_50]++;
3372 	}
3373 }
3374 
3375 /**
3376  * hal_cmem_write() - function for CMEM buffer writing
3377  * @hal_soc_hdl: HAL SOC handle
3378  * @offset: CMEM address
3379  * @value: value to write
3380  *
3381  * Return: None.
3382  */
3383 static inline void
hal_cmem_write(hal_soc_handle_t hal_soc_hdl,uint32_t offset,uint32_t value)3384 hal_cmem_write(hal_soc_handle_t hal_soc_hdl, uint32_t offset,
3385 	       uint32_t value)
3386 {
3387 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
3388 
3389 	if (hal_soc->ops->hal_cmem_write)
3390 		hal_soc->ops->hal_cmem_write(hal_soc_hdl, offset, value);
3391 
3392 	return;
3393 }
3394 
3395 static inline bool
hal_dmac_cmn_src_rxbuf_ring_get(hal_soc_handle_t hal_soc_hdl)3396 hal_dmac_cmn_src_rxbuf_ring_get(hal_soc_handle_t hal_soc_hdl)
3397 {
3398 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
3399 
3400 	return hal_soc->dmac_cmn_src_rxbuf_ring;
3401 }
3402 
3403 /**
3404  * hal_srng_dst_prefetch() - function to prefetch 4 destination ring descs
3405  * @hal_soc_hdl: HAL SOC handle
3406  * @hal_ring_hdl: Destination ring pointer
3407  * @num_valid: valid entries in the ring
3408  *
3409  * Return: last prefetched destination ring descriptor
3410  */
3411 static inline
hal_srng_dst_prefetch(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl,uint16_t num_valid)3412 void *hal_srng_dst_prefetch(hal_soc_handle_t hal_soc_hdl,
3413 			    hal_ring_handle_t hal_ring_hdl,
3414 			    uint16_t num_valid)
3415 {
3416 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3417 	uint8_t *desc;
3418 	uint32_t cnt;
3419 	/*
3420 	 * prefetching 4 HW descriptors will ensure atleast by the time
3421 	 * 5th HW descriptor is being processed it is guaranteed that the
3422 	 * 5th HW descriptor, its SW Desc, its nbuf and its nbuf's data
3423 	 * are in cache line. basically ensuring all the 4 (HW, SW, nbuf
3424 	 * & nbuf->data) are prefetched.
3425 	 */
3426 	uint32_t max_prefetch = 4;
3427 
3428 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
3429 		return NULL;
3430 
3431 	desc = (uint8_t *)&srng->ring_base_vaddr[srng->u.dst_ring.tp];
3432 
3433 	if (num_valid < max_prefetch)
3434 		max_prefetch = num_valid;
3435 
3436 	for (cnt = 0; cnt < max_prefetch; cnt++) {
3437 		desc += srng->entry_size * sizeof(uint32_t);
3438 		if (desc  == ((uint8_t *)srng->ring_vaddr_end))
3439 			desc = (uint8_t *)&srng->ring_base_vaddr[0];
3440 
3441 		qdf_prefetch(desc);
3442 	}
3443 	return (void *)desc;
3444 }
3445 
3446 /**
3447  * hal_srng_dst_prefetch_next_cached_desc() - function to prefetch next desc
3448  * @hal_soc_hdl: HAL SOC handle
3449  * @hal_ring_hdl: Destination ring pointer
3450  * @last_prefetched_hw_desc: last prefetched HW descriptor
3451  *
3452  * Return: next prefetched destination descriptor
3453  */
3454 static inline
hal_srng_dst_prefetch_next_cached_desc(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl,uint8_t * last_prefetched_hw_desc)3455 void *hal_srng_dst_prefetch_next_cached_desc(hal_soc_handle_t hal_soc_hdl,
3456 					     hal_ring_handle_t hal_ring_hdl,
3457 					     uint8_t *last_prefetched_hw_desc)
3458 {
3459 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3460 
3461 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
3462 		return NULL;
3463 
3464 	last_prefetched_hw_desc += srng->entry_size * sizeof(uint32_t);
3465 	if (last_prefetched_hw_desc == ((uint8_t *)srng->ring_vaddr_end))
3466 		last_prefetched_hw_desc = (uint8_t *)&srng->ring_base_vaddr[0];
3467 
3468 	qdf_prefetch(last_prefetched_hw_desc);
3469 	return (void *)last_prefetched_hw_desc;
3470 }
3471 
3472 /**
3473  * hal_srng_dst_prefetch_32_byte_desc() - function to prefetch a desc at
3474  *					  64 byte offset
3475  * @hal_soc_hdl: HAL SOC handle
3476  * @hal_ring_hdl: Destination ring pointer
3477  * @num_valid: valid entries in the ring
3478  *
3479  * Return: last prefetched destination ring descriptor
3480  */
3481 static inline
hal_srng_dst_prefetch_32_byte_desc(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl,uint16_t num_valid)3482 void *hal_srng_dst_prefetch_32_byte_desc(hal_soc_handle_t hal_soc_hdl,
3483 					 hal_ring_handle_t hal_ring_hdl,
3484 					 uint16_t num_valid)
3485 {
3486 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3487 	uint8_t *desc;
3488 
3489 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
3490 		return NULL;
3491 
3492 	desc = (uint8_t *)&srng->ring_base_vaddr[srng->u.dst_ring.tp];
3493 
3494 	if ((uintptr_t)desc & 0x3f)
3495 		desc += srng->entry_size * sizeof(uint32_t);
3496 	else
3497 		desc += (srng->entry_size * sizeof(uint32_t)) * 2;
3498 
3499 	if (desc  == ((uint8_t *)srng->ring_vaddr_end))
3500 		desc = (uint8_t *)&srng->ring_base_vaddr[0];
3501 
3502 	qdf_prefetch(desc);
3503 
3504 	return (void *)(desc + srng->entry_size * sizeof(uint32_t));
3505 }
3506 
3507 /**
3508  * hal_srng_dst_get_next_32_byte_desc() - function to prefetch next desc
3509  * @hal_soc_hdl: HAL SOC handle
3510  * @hal_ring_hdl: Destination ring pointer
3511  * @last_prefetched_hw_desc: last prefetched HW descriptor
3512  *
3513  * Return: next prefetched destination descriptor
3514  */
3515 static inline
hal_srng_dst_get_next_32_byte_desc(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl,uint8_t * last_prefetched_hw_desc)3516 void *hal_srng_dst_get_next_32_byte_desc(hal_soc_handle_t hal_soc_hdl,
3517 					 hal_ring_handle_t hal_ring_hdl,
3518 					 uint8_t *last_prefetched_hw_desc)
3519 {
3520 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3521 
3522 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
3523 		return NULL;
3524 
3525 	last_prefetched_hw_desc += srng->entry_size * sizeof(uint32_t);
3526 	if (last_prefetched_hw_desc == ((uint8_t *)srng->ring_vaddr_end))
3527 		last_prefetched_hw_desc = (uint8_t *)&srng->ring_base_vaddr[0];
3528 
3529 	return (void *)last_prefetched_hw_desc;
3530 }
3531 
3532 /**
3533  * hal_srng_src_set_hp() - set head idx.
3534  * @hal_ring_hdl: srng handle
3535  * @idx: head idx
3536  *
3537  * Return: none
3538  */
3539 static inline
hal_srng_src_set_hp(hal_ring_handle_t hal_ring_hdl,uint16_t idx)3540 void hal_srng_src_set_hp(hal_ring_handle_t hal_ring_hdl, uint16_t idx)
3541 {
3542 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3543 
3544 	srng->u.src_ring.hp = idx * srng->entry_size;
3545 }
3546 
3547 /**
3548  * hal_srng_dst_set_tp() - set tail idx.
3549  * @hal_ring_hdl: srng handle
3550  * @idx: tail idx
3551  *
3552  * Return: none
3553  */
3554 static inline
hal_srng_dst_set_tp(hal_ring_handle_t hal_ring_hdl,uint16_t idx)3555 void hal_srng_dst_set_tp(hal_ring_handle_t hal_ring_hdl, uint16_t idx)
3556 {
3557 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3558 
3559 	srng->u.dst_ring.tp = idx * srng->entry_size;
3560 }
3561 
3562 /**
3563  * hal_srng_src_get_tpidx() - get tail idx
3564  * @hal_ring_hdl: srng handle
3565  *
3566  * Return: tail idx
3567  */
3568 static inline
hal_srng_src_get_tpidx(hal_ring_handle_t hal_ring_hdl)3569 uint16_t hal_srng_src_get_tpidx(hal_ring_handle_t hal_ring_hdl)
3570 {
3571 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3572 	uint32_t tp = *(volatile uint32_t *)(srng->u.src_ring.tp_addr);
3573 
3574 	return tp / srng->entry_size;
3575 }
3576 
3577 /**
3578  * hal_srng_dst_get_hpidx() - get head idx
3579  * @hal_ring_hdl: srng handle
3580  *
3581  * Return: head idx
3582  */
3583 static inline
hal_srng_dst_get_hpidx(hal_ring_handle_t hal_ring_hdl)3584 uint16_t hal_srng_dst_get_hpidx(hal_ring_handle_t hal_ring_hdl)
3585 {
3586 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3587 	uint32_t hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
3588 
3589 	return hp / srng->entry_size;
3590 }
3591 
3592 /**
3593  * hal_srng_batch_threshold_irq_enabled() - check if srng batch count
3594  *                                          threshold irq enabled
3595  * @hal_ring_hdl: srng handle
3596  *
3597  * Return: true if enabled, false if not.
3598  */
3599 static inline
hal_srng_batch_threshold_irq_enabled(hal_ring_handle_t hal_ring_hdl)3600 bool hal_srng_batch_threshold_irq_enabled(hal_ring_handle_t hal_ring_hdl)
3601 {
3602 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
3603 
3604 	if (srng->intr_batch_cntr_thres_entries &&
3605 	    srng->flags & HAL_SRNG_MSI_INTR)
3606 		return true;
3607 	else
3608 		return false;
3609 }
3610 
3611 #ifdef FEATURE_DIRECT_LINK
3612 /**
3613  * hal_srng_set_msi_irq_config() - Set the MSI irq configuration for srng
3614  * @hal_soc_hdl: hal soc handle
3615  * @hal_ring_hdl: srng handle
3616  * @ring_params: ring parameters
3617  *
3618  * Return: QDF status
3619  */
3620 static inline QDF_STATUS
hal_srng_set_msi_irq_config(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl,struct hal_srng_params * ring_params)3621 hal_srng_set_msi_irq_config(hal_soc_handle_t hal_soc_hdl,
3622 			    hal_ring_handle_t hal_ring_hdl,
3623 			    struct hal_srng_params *ring_params)
3624 {
3625 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
3626 
3627 	return hal_soc->ops->hal_srng_set_msi_config(hal_ring_hdl, ring_params);
3628 }
3629 #else
3630 static inline QDF_STATUS
hal_srng_set_msi_irq_config(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl,struct hal_srng_params * ring_params)3631 hal_srng_set_msi_irq_config(hal_soc_handle_t hal_soc_hdl,
3632 			    hal_ring_handle_t hal_ring_hdl,
3633 			    struct hal_srng_params *ring_params)
3634 {
3635 	return QDF_STATUS_E_NOSUPPORT;
3636 }
3637 #endif
3638 #endif /* _HAL_APIH_ */
3639