1 /*
2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include "hal_hw_headers.h"
21 #include "hal_api.h"
22 #include "hal_reo.h"
23 #include "target_type.h"
24 #include "qdf_module.h"
25 #include "wcss_version.h"
26 #include <qdf_tracepoint.h>
27 #include "qdf_ssr_driver_dump.h"
28
29 struct tcl_data_cmd gtcl_data_symbol __attribute__((used));
30
31 #ifdef QCA_WIFI_QCA8074
32 void hal_qca6290_attach(struct hal_soc *hal);
33 #endif
34 #ifdef QCA_WIFI_QCA8074
35 void hal_qca8074_attach(struct hal_soc *hal);
36 #endif
37 #if defined(QCA_WIFI_QCA8074V2) || defined(QCA_WIFI_QCA6018) || \
38 defined(QCA_WIFI_QCA9574)
39 void hal_qca8074v2_attach(struct hal_soc *hal);
40 #endif
41 #ifdef QCA_WIFI_QCA6390
42 void hal_qca6390_attach(struct hal_soc *hal);
43 #endif
44 #ifdef QCA_WIFI_QCA6490
45 void hal_qca6490_attach(struct hal_soc *hal);
46 #endif
47 #ifdef QCA_WIFI_QCN9000
48 void hal_qcn9000_attach(struct hal_soc *hal);
49 #endif
50 #ifdef QCA_WIFI_QCN9224
51 void hal_qcn9224v2_attach(struct hal_soc *hal);
52 #endif
53 #if defined(QCA_WIFI_QCN6122) || defined(QCA_WIFI_QCN9160)
54 void hal_qcn6122_attach(struct hal_soc *hal);
55 #endif
56 #ifdef QCA_WIFI_QCN6432
57 void hal_qcn6432_attach(struct hal_soc *hal);
58 #endif
59 #ifdef QCA_WIFI_QCA6750
60 void hal_qca6750_attach(struct hal_soc *hal);
61 #endif
62 #ifdef QCA_WIFI_QCA5018
63 void hal_qca5018_attach(struct hal_soc *hal);
64 #endif
65 #ifdef QCA_WIFI_QCA5332
66 void hal_qca5332_attach(struct hal_soc *hal);
67 #endif
68 #ifdef QCA_WIFI_KIWI
69 void hal_kiwi_attach(struct hal_soc *hal);
70 #endif
71
72 #ifdef ENABLE_VERBOSE_DEBUG
73 bool is_hal_verbose_debug_enabled;
74 #endif
75
76 #define HAL_REO_DESTINATION_RING_CTRL_IX_0_ADDR(x) ((x) + 0x4)
77 #define HAL_REO_DESTINATION_RING_CTRL_IX_1_ADDR(x) ((x) + 0x8)
78 #define HAL_REO_DESTINATION_RING_CTRL_IX_2_ADDR(x) ((x) + 0xc)
79 #define HAL_REO_DESTINATION_RING_CTRL_IX_3_ADDR(x) ((x) + 0x10)
80
81 #ifdef ENABLE_HAL_REG_WR_HISTORY
82 struct hal_reg_write_fail_history hal_reg_wr_hist;
83
hal_reg_wr_fail_history_add(struct hal_soc * hal_soc,uint32_t offset,uint32_t wr_val,uint32_t rd_val)84 void hal_reg_wr_fail_history_add(struct hal_soc *hal_soc,
85 uint32_t offset,
86 uint32_t wr_val, uint32_t rd_val)
87 {
88 struct hal_reg_write_fail_entry *record;
89 int idx;
90
91 idx = hal_history_get_next_index(&hal_soc->reg_wr_fail_hist->index,
92 HAL_REG_WRITE_HIST_SIZE);
93
94 record = &hal_soc->reg_wr_fail_hist->record[idx];
95
96 record->timestamp = qdf_get_log_timestamp();
97 record->reg_offset = offset;
98 record->write_val = wr_val;
99 record->read_val = rd_val;
100 }
101
hal_reg_write_fail_history_init(struct hal_soc * hal)102 static void hal_reg_write_fail_history_init(struct hal_soc *hal)
103 {
104 hal->reg_wr_fail_hist = &hal_reg_wr_hist;
105
106 qdf_atomic_set(&hal->reg_wr_fail_hist->index, -1);
107 }
108 #else
hal_reg_write_fail_history_init(struct hal_soc * hal)109 static void hal_reg_write_fail_history_init(struct hal_soc *hal)
110 {
111 }
112 #endif
113
114 /**
115 * hal_get_srng_ring_id() - get the ring id of a described ring
116 * @hal: hal_soc data structure
117 * @ring_type: type enum describing the ring
118 * @ring_num: which ring of the ring type
119 * @mac_id: which mac does the ring belong to (or 0 for non-lmac rings)
120 *
121 * Return: the ring id or -EINVAL if the ring does not exist.
122 */
hal_get_srng_ring_id(struct hal_soc * hal,int ring_type,int ring_num,int mac_id)123 static int hal_get_srng_ring_id(struct hal_soc *hal, int ring_type,
124 int ring_num, int mac_id)
125 {
126 struct hal_hw_srng_config *ring_config =
127 HAL_SRNG_CONFIG(hal, ring_type);
128 int ring_id;
129
130 if (ring_num >= ring_config->max_rings) {
131 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
132 "%s: ring_num exceeded maximum no. of supported rings",
133 __func__);
134 /* TODO: This is a programming error. Assert if this happens */
135 return -EINVAL;
136 }
137
138 /*
139 * Some DMAC rings share a common source ring, hence don't provide them
140 * with separate ring IDs per LMAC.
141 */
142 if (ring_config->lmac_ring && !ring_config->dmac_cmn_ring) {
143 ring_id = (ring_config->start_ring_id + ring_num +
144 (mac_id * HAL_MAX_RINGS_PER_LMAC));
145 } else {
146 ring_id = ring_config->start_ring_id + ring_num;
147 }
148
149 return ring_id;
150 }
151
hal_get_srng(struct hal_soc * hal,int ring_id)152 static struct hal_srng *hal_get_srng(struct hal_soc *hal, int ring_id)
153 {
154 /* TODO: Should we allocate srng structures dynamically? */
155 return &(hal->srng_list[ring_id]);
156 }
157
158 #ifndef SHADOW_REG_CONFIG_DISABLED
159 #define HP_OFFSET_IN_REG_START 1
160 #define OFFSET_FROM_HP_TO_TP 4
hal_update_srng_hp_tp_address(struct hal_soc * hal_soc,int shadow_config_index,int ring_type,int ring_num)161 static void hal_update_srng_hp_tp_address(struct hal_soc *hal_soc,
162 int shadow_config_index,
163 int ring_type,
164 int ring_num)
165 {
166 struct hal_srng *srng;
167 int ring_id;
168 struct hal_hw_srng_config *ring_config =
169 HAL_SRNG_CONFIG(hal_soc, ring_type);
170
171 ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, 0);
172 if (ring_id < 0)
173 return;
174
175 srng = hal_get_srng(hal_soc, ring_id);
176
177 if (ring_config->ring_dir == HAL_SRNG_DST_RING) {
178 srng->u.dst_ring.tp_addr = SHADOW_REGISTER(shadow_config_index)
179 + hal_soc->dev_base_addr;
180 hal_debug("tp_addr=%pK dev base addr %pK index %u",
181 srng->u.dst_ring.tp_addr, hal_soc->dev_base_addr,
182 shadow_config_index);
183 } else {
184 srng->u.src_ring.hp_addr = SHADOW_REGISTER(shadow_config_index)
185 + hal_soc->dev_base_addr;
186 hal_debug("hp_addr=%pK dev base addr %pK index %u",
187 srng->u.src_ring.hp_addr,
188 hal_soc->dev_base_addr, shadow_config_index);
189 }
190
191 }
192 #endif
193
194 #ifdef GENERIC_SHADOW_REGISTER_ACCESS_ENABLE
hal_set_one_target_reg_config(struct hal_soc * hal,uint32_t target_reg_offset,int list_index)195 void hal_set_one_target_reg_config(struct hal_soc *hal,
196 uint32_t target_reg_offset,
197 int list_index)
198 {
199 int i = list_index;
200
201 qdf_assert_always(i < MAX_GENERIC_SHADOW_REG);
202 hal->list_shadow_reg_config[i].target_register =
203 target_reg_offset;
204 hal->num_generic_shadow_regs_configured++;
205 }
206
207 qdf_export_symbol(hal_set_one_target_reg_config);
208
209 #define REO_R0_DESTINATION_RING_CTRL_ADDR_OFFSET 0x4
210 #define MAX_REO_REMAP_SHADOW_REGS 4
hal_set_shadow_regs(void * hal_soc)211 QDF_STATUS hal_set_shadow_regs(void *hal_soc)
212 {
213 uint32_t target_reg_offset;
214 struct hal_soc *hal = (struct hal_soc *)hal_soc;
215 int i;
216 struct hal_hw_srng_config *srng_config =
217 &hal->hw_srng_table[WBM2SW_RELEASE];
218 uint32_t reo_reg_base;
219
220 reo_reg_base = hal_get_reo_reg_base_offset(hal_soc);
221
222 target_reg_offset =
223 HAL_REO_DESTINATION_RING_CTRL_IX_0_ADDR(reo_reg_base);
224
225 for (i = 0; i < MAX_REO_REMAP_SHADOW_REGS; i++) {
226 hal_set_one_target_reg_config(hal, target_reg_offset, i);
227 target_reg_offset += REO_R0_DESTINATION_RING_CTRL_ADDR_OFFSET;
228 }
229
230 target_reg_offset = srng_config->reg_start[HP_OFFSET_IN_REG_START];
231 target_reg_offset += (srng_config->reg_size[HP_OFFSET_IN_REG_START]
232 * HAL_IPA_TX_COMP_RING_IDX);
233
234 hal_set_one_target_reg_config(hal, target_reg_offset, i);
235 return QDF_STATUS_SUCCESS;
236 }
237
238 qdf_export_symbol(hal_set_shadow_regs);
239
hal_construct_shadow_regs(void * hal_soc)240 QDF_STATUS hal_construct_shadow_regs(void *hal_soc)
241 {
242 struct hal_soc *hal = (struct hal_soc *)hal_soc;
243 int shadow_config_index = hal->num_shadow_registers_configured;
244 int i;
245 int num_regs = hal->num_generic_shadow_regs_configured;
246
247 for (i = 0; i < num_regs; i++) {
248 qdf_assert_always(shadow_config_index < MAX_SHADOW_REGISTERS);
249 hal->shadow_config[shadow_config_index].addr =
250 hal->list_shadow_reg_config[i].target_register;
251 hal->list_shadow_reg_config[i].shadow_config_index =
252 shadow_config_index;
253 hal->list_shadow_reg_config[i].va =
254 SHADOW_REGISTER(shadow_config_index) +
255 (uintptr_t)hal->dev_base_addr;
256 hal_debug("target_reg %x, shadow register 0x%x shadow_index 0x%x",
257 hal->shadow_config[shadow_config_index].addr,
258 SHADOW_REGISTER(shadow_config_index),
259 shadow_config_index);
260 shadow_config_index++;
261 hal->num_shadow_registers_configured++;
262 }
263 return QDF_STATUS_SUCCESS;
264 }
265
266 qdf_export_symbol(hal_construct_shadow_regs);
267 #endif
268
269 #ifndef SHADOW_REG_CONFIG_DISABLED
270
hal_set_one_shadow_config(void * hal_soc,int ring_type,int ring_num)271 QDF_STATUS hal_set_one_shadow_config(void *hal_soc,
272 int ring_type,
273 int ring_num)
274 {
275 uint32_t target_register;
276 struct hal_soc *hal = (struct hal_soc *)hal_soc;
277 struct hal_hw_srng_config *srng_config = &hal->hw_srng_table[ring_type];
278 int shadow_config_index = hal->num_shadow_registers_configured;
279
280 if (shadow_config_index >= MAX_SHADOW_REGISTERS) {
281 QDF_ASSERT(0);
282 return QDF_STATUS_E_RESOURCES;
283 }
284
285 hal->num_shadow_registers_configured++;
286
287 target_register = srng_config->reg_start[HP_OFFSET_IN_REG_START];
288 target_register += (srng_config->reg_size[HP_OFFSET_IN_REG_START]
289 *ring_num);
290
291 /* if the ring is a dst ring, we need to shadow the tail pointer */
292 if (srng_config->ring_dir == HAL_SRNG_DST_RING)
293 target_register += OFFSET_FROM_HP_TO_TP;
294
295 hal->shadow_config[shadow_config_index].addr = target_register;
296
297 /* update hp/tp addr in the hal_soc structure*/
298 hal_update_srng_hp_tp_address(hal_soc, shadow_config_index, ring_type,
299 ring_num);
300
301 hal_debug("target_reg %x, shadow register 0x%x shadow_index 0x%x, ring_type %d, ring num %d",
302 target_register,
303 SHADOW_REGISTER(shadow_config_index),
304 shadow_config_index,
305 ring_type, ring_num);
306
307 return QDF_STATUS_SUCCESS;
308 }
309
310 qdf_export_symbol(hal_set_one_shadow_config);
311
hal_construct_srng_shadow_regs(void * hal_soc)312 QDF_STATUS hal_construct_srng_shadow_regs(void *hal_soc)
313 {
314 int ring_type, ring_num;
315 struct hal_soc *hal = (struct hal_soc *)hal_soc;
316
317 for (ring_type = 0; ring_type < MAX_RING_TYPES; ring_type++) {
318 struct hal_hw_srng_config *srng_config =
319 &hal->hw_srng_table[ring_type];
320
321 if (ring_type == CE_SRC ||
322 ring_type == CE_DST ||
323 ring_type == CE_DST_STATUS)
324 continue;
325
326 if (srng_config->lmac_ring)
327 continue;
328
329 for (ring_num = 0; ring_num < srng_config->max_rings;
330 ring_num++)
331 hal_set_one_shadow_config(hal_soc, ring_type, ring_num);
332 }
333
334 return QDF_STATUS_SUCCESS;
335 }
336
337 qdf_export_symbol(hal_construct_srng_shadow_regs);
338 #else
339
hal_construct_srng_shadow_regs(void * hal_soc)340 QDF_STATUS hal_construct_srng_shadow_regs(void *hal_soc)
341 {
342 return QDF_STATUS_SUCCESS;
343 }
344
345 qdf_export_symbol(hal_construct_srng_shadow_regs);
346
hal_set_one_shadow_config(void * hal_soc,int ring_type,int ring_num)347 QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type,
348 int ring_num)
349 {
350 return QDF_STATUS_SUCCESS;
351 }
352 qdf_export_symbol(hal_set_one_shadow_config);
353 #endif
354
hal_get_shadow_config(void * hal_soc,struct pld_shadow_reg_v2_cfg ** shadow_config,int * num_shadow_registers_configured)355 void hal_get_shadow_config(void *hal_soc,
356 struct pld_shadow_reg_v2_cfg **shadow_config,
357 int *num_shadow_registers_configured)
358 {
359 struct hal_soc *hal = (struct hal_soc *)hal_soc;
360
361 *shadow_config = &hal->shadow_config[0].v2;
362 *num_shadow_registers_configured =
363 hal->num_shadow_registers_configured;
364 }
365 qdf_export_symbol(hal_get_shadow_config);
366
367 #ifdef CONFIG_SHADOW_V3
hal_get_shadow_v3_config(void * hal_soc,struct pld_shadow_reg_v3_cfg ** shadow_config,int * num_shadow_registers_configured)368 void hal_get_shadow_v3_config(void *hal_soc,
369 struct pld_shadow_reg_v3_cfg **shadow_config,
370 int *num_shadow_registers_configured)
371 {
372 struct hal_soc *hal = (struct hal_soc *)hal_soc;
373
374 *shadow_config = &hal->shadow_config[0].v3;
375 *num_shadow_registers_configured =
376 hal->num_shadow_registers_configured;
377 }
378 qdf_export_symbol(hal_get_shadow_v3_config);
379 #endif
380
hal_validate_shadow_register(struct hal_soc * hal,uint32_t * destination,uint32_t * shadow_address)381 static bool hal_validate_shadow_register(struct hal_soc *hal,
382 uint32_t *destination,
383 uint32_t *shadow_address)
384 {
385 unsigned int index;
386 uint32_t *shadow_0_offset = SHADOW_REGISTER(0) + hal->dev_base_addr;
387 int destination_ba_offset =
388 ((char *)destination) - (char *)hal->dev_base_addr;
389
390 index = shadow_address - shadow_0_offset;
391
392 if (index >= MAX_SHADOW_REGISTERS) {
393 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
394 "%s: index %x out of bounds", __func__, index);
395 goto error;
396 } else if (hal->shadow_config[index].addr != destination_ba_offset) {
397 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
398 "%s: sanity check failure, expected %x, found %x",
399 __func__, destination_ba_offset,
400 hal->shadow_config[index].addr);
401 goto error;
402 }
403 return true;
404 error:
405 qdf_print("baddr %pK, destination %pK, shadow_address %pK s0offset %pK index %x",
406 hal->dev_base_addr, destination, shadow_address,
407 shadow_0_offset, index);
408 QDF_BUG(0);
409 return false;
410 }
411
hal_target_based_configure(struct hal_soc * hal)412 static void hal_target_based_configure(struct hal_soc *hal)
413 {
414 /*
415 * Indicate Initialization of srngs to avoid force wake
416 * as umac power collapse is not enabled yet
417 */
418 hal->init_phase = true;
419
420 switch (hal->target_type) {
421 #ifdef QCA_WIFI_QCA6290
422 case TARGET_TYPE_QCA6290:
423 hal->use_register_windowing = true;
424 hal_qca6290_attach(hal);
425 break;
426 #endif
427 #ifdef QCA_WIFI_QCA6390
428 case TARGET_TYPE_QCA6390:
429 hal->use_register_windowing = true;
430 hal_qca6390_attach(hal);
431 break;
432 #endif
433 #ifdef QCA_WIFI_QCA6490
434 case TARGET_TYPE_QCA6490:
435 hal->use_register_windowing = true;
436 hal_qca6490_attach(hal);
437 break;
438 #endif
439 #ifdef QCA_WIFI_QCA6750
440 case TARGET_TYPE_QCA6750:
441 hal->use_register_windowing = true;
442 hal->static_window_map = true;
443 hal_qca6750_attach(hal);
444 break;
445 #endif
446 #ifdef QCA_WIFI_KIWI
447 case TARGET_TYPE_KIWI:
448 case TARGET_TYPE_MANGO:
449 case TARGET_TYPE_PEACH:
450 hal->use_register_windowing = true;
451 hal_kiwi_attach(hal);
452 break;
453 #endif
454 #if defined(QCA_WIFI_QCA8074) && defined(WIFI_TARGET_TYPE_3_0)
455 case TARGET_TYPE_QCA8074:
456 hal_qca8074_attach(hal);
457 break;
458 #endif
459
460 #if defined(QCA_WIFI_QCA8074V2)
461 case TARGET_TYPE_QCA8074V2:
462 hal_qca8074v2_attach(hal);
463 break;
464 #endif
465
466 #if defined(QCA_WIFI_QCA6018)
467 case TARGET_TYPE_QCA6018:
468 hal_qca8074v2_attach(hal);
469 break;
470 #endif
471
472 #if defined(QCA_WIFI_QCA9574)
473 case TARGET_TYPE_QCA9574:
474 hal_qca8074v2_attach(hal);
475 break;
476 #endif
477
478 #if defined(QCA_WIFI_QCN6122)
479 case TARGET_TYPE_QCN6122:
480 hal->use_register_windowing = true;
481 /*
482 * Static window map is enabled for qcn9000 to use 2mb bar
483 * size and use multiple windows to write into registers.
484 */
485 hal->static_window_map = true;
486 hal_qcn6122_attach(hal);
487 break;
488 #endif
489
490 #if defined(QCA_WIFI_QCN9160)
491 case TARGET_TYPE_QCN9160:
492 hal->use_register_windowing = true;
493 /*
494 * Static window map is enabled for qcn9160 to use 2mb bar
495 * size and use multiple windows to write into registers.
496 */
497 hal->static_window_map = true;
498 hal_qcn6122_attach(hal);
499 break;
500 #endif
501
502 #if defined(QCA_WIFI_QCN6432)
503 case TARGET_TYPE_QCN6432:
504 hal->use_register_windowing = true;
505 /*
506 * Static window map is enabled for qcn6432 to use 2mb bar
507 * size and use multiple windows to write into registers.
508 */
509 hal->static_window_map = true;
510 hal_qcn6432_attach(hal);
511 break;
512 #endif
513
514 #ifdef QCA_WIFI_QCN9000
515 case TARGET_TYPE_QCN9000:
516 hal->use_register_windowing = true;
517 /*
518 * Static window map is enabled for qcn9000 to use 2mb bar
519 * size and use multiple windows to write into registers.
520 */
521 hal->static_window_map = true;
522 hal_qcn9000_attach(hal);
523 break;
524 #endif
525 #ifdef QCA_WIFI_QCA5018
526 case TARGET_TYPE_QCA5018:
527 hal->use_register_windowing = true;
528 hal->static_window_map = true;
529 hal_qca5018_attach(hal);
530 break;
531 #endif
532 #ifdef QCA_WIFI_QCN9224
533 case TARGET_TYPE_QCN9224:
534 hal->use_register_windowing = true;
535 hal->static_window_map = true;
536 if (hal->version == 1)
537 qdf_assert_always(0);
538 else
539 hal_qcn9224v2_attach(hal);
540 break;
541 #endif
542 #ifdef QCA_WIFI_QCA5332
543 case TARGET_TYPE_QCA5332:
544 hal->use_register_windowing = true;
545 hal->static_window_map = true;
546 hal_qca5332_attach(hal);
547 break;
548 #endif
549 #ifdef QCA_WIFI_WCN6450
550 case TARGET_TYPE_WCN6450:
551 hal->use_register_windowing = true;
552 hal->static_window_map = true;
553 hal_wcn6450_attach(hal);
554 break;
555 #endif
556 default:
557 break;
558 }
559 }
560
hal_get_target_type(hal_soc_handle_t hal_soc_hdl)561 uint32_t hal_get_target_type(hal_soc_handle_t hal_soc_hdl)
562 {
563 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
564 struct hif_target_info *tgt_info =
565 hif_get_target_info_handle(hal_soc->hif_handle);
566
567 return tgt_info->target_type;
568 }
569
570 qdf_export_symbol(hal_get_target_type);
571
572 #if defined(FEATURE_HAL_DELAYED_REG_WRITE)
573 /**
574 * hal_is_reg_write_tput_level_high() - throughput level for delayed reg writes
575 * @hal: hal_soc pointer
576 *
577 * Return: true if throughput is high, else false.
578 */
hal_is_reg_write_tput_level_high(struct hal_soc * hal)579 static inline bool hal_is_reg_write_tput_level_high(struct hal_soc *hal)
580 {
581 int bw_level = hif_get_bandwidth_level(hal->hif_handle);
582
583 return (bw_level >= PLD_BUS_WIDTH_MEDIUM) ? true : false;
584 }
585
586 static inline
hal_fill_reg_write_srng_stats(struct hal_srng * srng,char * buf,qdf_size_t size)587 char *hal_fill_reg_write_srng_stats(struct hal_srng *srng,
588 char *buf, qdf_size_t size)
589 {
590 qdf_scnprintf(buf, size, "enq %u deq %u coal %u direct %u",
591 srng->wstats.enqueues, srng->wstats.dequeues,
592 srng->wstats.coalesces, srng->wstats.direct);
593 return buf;
594 }
595
596 /* bytes for local buffer */
597 #define HAL_REG_WRITE_SRNG_STATS_LEN 100
598
599 #ifndef WLAN_SOFTUMAC_SUPPORT
hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl)600 void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl)
601 {
602 struct hal_srng *srng;
603 char buf[HAL_REG_WRITE_SRNG_STATS_LEN];
604 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl;
605
606 srng = hal_get_srng(hal, HAL_SRNG_SW2TCL1);
607 hal_debug("SW2TCL1: %s",
608 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf)));
609
610 srng = hal_get_srng(hal, HAL_SRNG_WBM2SW0_RELEASE);
611 hal_debug("WBM2SW0: %s",
612 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf)));
613
614 srng = hal_get_srng(hal, HAL_SRNG_REO2SW1);
615 hal_debug("REO2SW1: %s",
616 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf)));
617
618 srng = hal_get_srng(hal, HAL_SRNG_REO2SW2);
619 hal_debug("REO2SW2: %s",
620 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf)));
621
622 srng = hal_get_srng(hal, HAL_SRNG_REO2SW3);
623 hal_debug("REO2SW3: %s",
624 hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf)));
625 }
626
hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl)627 void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl)
628 {
629 uint32_t *hist;
630 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl;
631
632 hist = hal->stats.wstats.sched_delay;
633 hal_debug("wstats: enq %u deq %u coal %u direct %u q_depth %u max_q %u sched-delay hist %u %u %u %u",
634 qdf_atomic_read(&hal->stats.wstats.enqueues),
635 hal->stats.wstats.dequeues,
636 qdf_atomic_read(&hal->stats.wstats.coalesces),
637 qdf_atomic_read(&hal->stats.wstats.direct),
638 qdf_atomic_read(&hal->stats.wstats.q_depth),
639 hal->stats.wstats.max_q_depth,
640 hist[REG_WRITE_SCHED_DELAY_SUB_100us],
641 hist[REG_WRITE_SCHED_DELAY_SUB_1000us],
642 hist[REG_WRITE_SCHED_DELAY_SUB_5000us],
643 hist[REG_WRITE_SCHED_DELAY_GT_5000us]);
644 }
645 #else
hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl)646 void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl)
647 {
648 }
649
650 /* TODO: Need separate logic for Evros */
hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl)651 void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl)
652 {
653 }
654 #endif
655
hal_get_reg_write_pending_work(void * hal_soc)656 int hal_get_reg_write_pending_work(void *hal_soc)
657 {
658 struct hal_soc *hal = (struct hal_soc *)hal_soc;
659
660 return qdf_atomic_read(&hal->active_work_cnt);
661 }
662
663 #endif
664
665 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
666 #ifdef MEMORY_DEBUG
667 /*
668 * Length of the queue(array) used to hold delayed register writes.
669 * Must be a multiple of 2.
670 */
671 #define HAL_REG_WRITE_QUEUE_LEN 128
672 #else
673 #define HAL_REG_WRITE_QUEUE_LEN 32
674 #endif
675
676 #ifdef QCA_WIFI_QCA6750
677
678 #define HAL_DEL_WRITE_FORCE_UPDATE_THRES 5
679
hal_srng_update_last_hptp(struct hal_srng * srng)680 static inline void hal_srng_update_last_hptp(struct hal_srng *srng)
681 {
682 if (srng->ring_dir == HAL_SRNG_SRC_RING)
683 srng->updated_hp = srng->u.src_ring.hp;
684 else
685 srng->updated_tp = srng->u.dst_ring.tp;
686
687 srng->force_cnt = 0;
688 }
689
690 /* If HP/TP register updates are delayed due to delayed reg
691 * write work not getting scheduled, hardware would see HP/TP
692 * delta and will fire interrupts until the HP/TP updates reach
693 * the hardware.
694 *
695 * When system is heavily stressed, this delay in HP/TP updates
696 * would result in IRQ storm further stressing the system. Force
697 * update HP/TP to the hardware under such scenarios to avoid this.
698 */
hal_srng_check_and_update_hptp(struct hal_soc * hal_soc,struct hal_srng * srng,bool update)699 void hal_srng_check_and_update_hptp(struct hal_soc *hal_soc,
700 struct hal_srng *srng, bool update)
701 {
702 uint32_t value;
703
704 if (!update)
705 return;
706
707 SRNG_LOCK(&srng->lock);
708 if (srng->ring_dir == HAL_SRNG_SRC_RING) {
709 value = srng->u.src_ring.hp;
710
711 if (value == srng->updated_hp ||
712 srng->force_cnt++ < HAL_DEL_WRITE_FORCE_UPDATE_THRES)
713 goto out_unlock;
714
715 hal_write_address_32_mb(hal_soc, srng->u.src_ring.hp_addr,
716 value, false);
717 } else {
718 value = srng->u.dst_ring.tp;
719
720 if (value == srng->updated_tp ||
721 srng->force_cnt++ < HAL_DEL_WRITE_FORCE_UPDATE_THRES)
722 goto out_unlock;
723
724 hal_write_address_32_mb(hal_soc, srng->u.dst_ring.tp_addr,
725 value, false);
726 }
727
728 hal_srng_update_last_hptp(srng);
729 hal_srng_reg_his_add(srng, value);
730 qdf_atomic_inc(&hal_soc->stats.wstats.direct);
731 srng->wstats.direct++;
732
733 out_unlock:
734 SRNG_UNLOCK(&srng->lock);
735 }
736 #else
hal_srng_update_last_hptp(struct hal_srng * srng)737 static inline void hal_srng_update_last_hptp(struct hal_srng *srng)
738 {
739 }
740 #endif /* QCA_WIFI_QCA6750 */
741
742 /**
743 * hal_process_reg_write_q_elem() - process a register write queue element
744 * @hal: hal_soc pointer
745 * @q_elem: pointer to hal register write queue element
746 *
747 * Return: The value which was written to the address
748 */
749 static uint32_t
hal_process_reg_write_q_elem(struct hal_soc * hal,struct hal_reg_write_q_elem * q_elem)750 hal_process_reg_write_q_elem(struct hal_soc *hal,
751 struct hal_reg_write_q_elem *q_elem)
752 {
753 struct hal_srng *srng = q_elem->srng;
754 uint32_t write_val;
755
756 SRNG_LOCK(&srng->lock);
757
758 srng->reg_write_in_progress = false;
759 srng->wstats.dequeues++;
760
761 if (srng->ring_dir == HAL_SRNG_SRC_RING) {
762 q_elem->dequeue_val = srng->u.src_ring.hp;
763 hal_write_address_32_mb(hal,
764 srng->u.src_ring.hp_addr,
765 srng->u.src_ring.hp, false);
766 write_val = srng->u.src_ring.hp;
767 } else {
768 q_elem->dequeue_val = srng->u.dst_ring.tp;
769 hal_write_address_32_mb(hal,
770 srng->u.dst_ring.tp_addr,
771 srng->u.dst_ring.tp, false);
772 write_val = srng->u.dst_ring.tp;
773 }
774
775 hal_srng_update_last_hptp(srng);
776 hal_srng_reg_his_add(srng, write_val);
777
778 q_elem->valid = 0;
779 srng->last_dequeue_time = q_elem->dequeue_time;
780 SRNG_UNLOCK(&srng->lock);
781
782 return write_val;
783 }
784
785 /**
786 * hal_reg_write_fill_sched_delay_hist() - fill reg write delay histogram in hal
787 * @hal: hal_soc pointer
788 * @delay_us: delay in us
789 *
790 * Return: None
791 */
hal_reg_write_fill_sched_delay_hist(struct hal_soc * hal,uint64_t delay_us)792 static inline void hal_reg_write_fill_sched_delay_hist(struct hal_soc *hal,
793 uint64_t delay_us)
794 {
795 uint32_t *hist;
796
797 hist = hal->stats.wstats.sched_delay;
798
799 if (delay_us < 100)
800 hist[REG_WRITE_SCHED_DELAY_SUB_100us]++;
801 else if (delay_us < 1000)
802 hist[REG_WRITE_SCHED_DELAY_SUB_1000us]++;
803 else if (delay_us < 5000)
804 hist[REG_WRITE_SCHED_DELAY_SUB_5000us]++;
805 else
806 hist[REG_WRITE_SCHED_DELAY_GT_5000us]++;
807 }
808
809 #ifdef SHADOW_WRITE_DELAY
810
811 #define SHADOW_WRITE_MIN_DELTA_US 5
812 #define SHADOW_WRITE_DELAY_US 50
813
814 /*
815 * Never add those srngs which are performance relate.
816 * The delay itself will hit performance heavily.
817 */
818 #define IS_SRNG_MATCH(s) ((s)->ring_id == HAL_SRNG_CE_1_DST_STATUS || \
819 (s)->ring_id == HAL_SRNG_CE_1_DST)
820
hal_reg_write_need_delay(struct hal_reg_write_q_elem * elem)821 static inline bool hal_reg_write_need_delay(struct hal_reg_write_q_elem *elem)
822 {
823 struct hal_srng *srng = elem->srng;
824 struct hal_soc *hal;
825 qdf_time_t now;
826 qdf_iomem_t real_addr;
827
828 if (qdf_unlikely(!srng))
829 return false;
830
831 hal = srng->hal_soc;
832 if (qdf_unlikely(!hal))
833 return false;
834
835 /* Check if it is target srng, and valid shadow reg */
836 if (qdf_likely(!IS_SRNG_MATCH(srng)))
837 return false;
838
839 if (srng->ring_dir == HAL_SRNG_SRC_RING)
840 real_addr = SRNG_SRC_ADDR(srng, HP);
841 else
842 real_addr = SRNG_DST_ADDR(srng, TP);
843 if (!hal_validate_shadow_register(hal, real_addr, elem->addr))
844 return false;
845
846 /* Check the time delta from last write of same srng */
847 now = qdf_get_log_timestamp();
848 if (qdf_log_timestamp_to_usecs(now - srng->last_dequeue_time) >
849 SHADOW_WRITE_MIN_DELTA_US)
850 return false;
851
852 /* Delay dequeue, and record */
853 qdf_udelay(SHADOW_WRITE_DELAY_US);
854
855 srng->wstats.dequeue_delay++;
856 hal->stats.wstats.dequeue_delay++;
857
858 return true;
859 }
860 #else
hal_reg_write_need_delay(struct hal_reg_write_q_elem * elem)861 static inline bool hal_reg_write_need_delay(struct hal_reg_write_q_elem *elem)
862 {
863 return false;
864 }
865 #endif
866
867 #define MAX_DELAYED_REG_WRITE_RETRY 5
868
869 /**
870 * hal_reg_write_work() - Worker to process delayed writes
871 * @arg: hal_soc pointer
872 *
873 * Return: None
874 */
hal_reg_write_work(void * arg)875 static void hal_reg_write_work(void *arg)
876 {
877 int32_t q_depth, write_val;
878 struct hal_soc *hal = arg;
879 struct hal_reg_write_q_elem *q_elem;
880 uint64_t delta_us;
881 uint8_t ring_id;
882 uint32_t *addr;
883 uint32_t num_processed = 0;
884 uint8_t retry_count = 0;
885
886 q_elem = &hal->reg_write_queue[(hal->read_idx)];
887 q_elem->work_scheduled_time = qdf_get_log_timestamp();
888 q_elem->cpu_id = qdf_get_cpu();
889
890 /* Make sure q_elem consistent in the memory for multi-cores */
891 qdf_rmb();
892 if (!q_elem->valid)
893 return;
894
895 q_depth = qdf_atomic_read(&hal->stats.wstats.q_depth);
896 if (q_depth > hal->stats.wstats.max_q_depth)
897 hal->stats.wstats.max_q_depth = q_depth;
898
899 if (hif_prevent_link_low_power_states(hal->hif_handle)) {
900 hal->stats.wstats.prevent_l1_fails++;
901 return;
902 }
903
904 while (true) {
905 qdf_rmb();
906 if (!q_elem->valid)
907 break;
908
909 qdf_rmb();
910 /* buy some more time to make sure all fields
911 * in q_elem is updated per different CPUs, in
912 * case wmb/rmb is not taken effect
913 */
914 if (qdf_unlikely(!q_elem->srng ||
915 (qdf_atomic_read(&q_elem->ring_id) !=
916 q_elem->srng->ring_id))) {
917 hal_err_rl("q_elem fields not up to date 0x%x 0x%x",
918 q_elem->srng ? q_elem->srng->ring_id : 0xDEAD,
919 qdf_atomic_read(&q_elem->ring_id));
920 if (retry_count++ < MAX_DELAYED_REG_WRITE_RETRY) {
921 /* Sleep for 1ms before retry */
922 qdf_sleep(1);
923 continue;
924 }
925 qdf_assert_always(0);
926 }
927
928 q_elem->dequeue_time = qdf_get_log_timestamp();
929 ring_id = q_elem->srng->ring_id;
930 addr = q_elem->addr;
931 delta_us = qdf_log_timestamp_to_usecs(q_elem->dequeue_time -
932 q_elem->enqueue_time);
933 hal_reg_write_fill_sched_delay_hist(hal, delta_us);
934
935 hal->stats.wstats.dequeues++;
936 qdf_atomic_dec(&hal->stats.wstats.q_depth);
937
938 if (hal_reg_write_need_delay(q_elem))
939 hal_verbose_debug("Delay reg writer for srng 0x%x, addr 0x%pK",
940 q_elem->srng->ring_id, q_elem->addr);
941
942 write_val = hal_process_reg_write_q_elem(hal, q_elem);
943 hal_verbose_debug("read_idx %u srng 0x%x, addr 0x%pK dequeue_val %u sched delay %llu us",
944 hal->read_idx, ring_id, addr, write_val, delta_us);
945
946 qdf_trace_dp_del_reg_write(ring_id, q_elem->enqueue_val,
947 q_elem->dequeue_val,
948 q_elem->enqueue_time,
949 q_elem->dequeue_time);
950
951 num_processed++;
952 hal->read_idx = (hal->read_idx + 1) &
953 (HAL_REG_WRITE_QUEUE_LEN - 1);
954 q_elem = &hal->reg_write_queue[(hal->read_idx)];
955 retry_count = 0;
956 }
957
958 hif_allow_link_low_power_states(hal->hif_handle);
959 /*
960 * Decrement active_work_cnt by the number of elements dequeued after
961 * hif_allow_link_low_power_states.
962 * This makes sure that hif_try_complete_tasks will wait till we make
963 * the bus access in hif_allow_link_low_power_states. This will avoid
964 * race condition between delayed register worker and bus suspend
965 * (system suspend or runtime suspend).
966 *
967 * The following decrement should be done at the end!
968 */
969 qdf_atomic_sub(num_processed, &hal->active_work_cnt);
970 }
971
__hal_flush_reg_write_work(struct hal_soc * hal)972 static void __hal_flush_reg_write_work(struct hal_soc *hal)
973 {
974 qdf_flush_work(&hal->reg_write_work);
975 qdf_disable_work(&hal->reg_write_work);
976 }
977
hal_flush_reg_write_work(hal_soc_handle_t hal_handle)978 void hal_flush_reg_write_work(hal_soc_handle_t hal_handle)
979 { __hal_flush_reg_write_work((struct hal_soc *)hal_handle);
980 }
981
982 /**
983 * hal_reg_write_enqueue() - enqueue register writes into kworker
984 * @hal_soc: hal_soc pointer
985 * @srng: srng pointer
986 * @addr: iomem address of register
987 * @value: value to be written to iomem address
988 *
989 * This function executes from within the SRNG LOCK
990 *
991 * Return: None
992 */
hal_reg_write_enqueue(struct hal_soc * hal_soc,struct hal_srng * srng,void __iomem * addr,uint32_t value)993 static void hal_reg_write_enqueue(struct hal_soc *hal_soc,
994 struct hal_srng *srng,
995 void __iomem *addr,
996 uint32_t value)
997 {
998 struct hal_reg_write_q_elem *q_elem;
999 uint32_t write_idx;
1000
1001 if (srng->reg_write_in_progress) {
1002 hal_verbose_debug("Already in progress srng ring id 0x%x addr 0x%pK val %u",
1003 srng->ring_id, addr, value);
1004 qdf_atomic_inc(&hal_soc->stats.wstats.coalesces);
1005 srng->wstats.coalesces++;
1006 return;
1007 }
1008
1009 write_idx = qdf_atomic_inc_return(&hal_soc->write_idx);
1010
1011 write_idx = write_idx & (HAL_REG_WRITE_QUEUE_LEN - 1);
1012
1013 q_elem = &hal_soc->reg_write_queue[write_idx];
1014
1015 if (q_elem->valid) {
1016 hal_err("queue full");
1017 QDF_BUG(0);
1018 return;
1019 }
1020
1021 qdf_atomic_inc(&hal_soc->stats.wstats.enqueues);
1022 srng->wstats.enqueues++;
1023
1024 qdf_atomic_inc(&hal_soc->stats.wstats.q_depth);
1025
1026 q_elem->srng = srng;
1027 q_elem->addr = addr;
1028 qdf_atomic_set(&q_elem->ring_id, srng->ring_id);
1029 q_elem->enqueue_val = value;
1030 q_elem->enqueue_time = qdf_get_log_timestamp();
1031
1032 /*
1033 * Before the valid flag is set to true, all the other
1034 * fields in the q_elem needs to be updated in memory.
1035 * Else there is a chance that the dequeuing worker thread
1036 * might read stale entries and process incorrect srng.
1037 */
1038 qdf_wmb();
1039 q_elem->valid = true;
1040
1041 /*
1042 * After all other fields in the q_elem has been updated
1043 * in memory successfully, the valid flag needs to be updated
1044 * in memory in time too.
1045 * Else there is a chance that the dequeuing worker thread
1046 * might read stale valid flag and the work will be bypassed
1047 * for this round. And if there is no other work scheduled
1048 * later, this hal register writing won't be updated any more.
1049 */
1050 qdf_wmb();
1051
1052 srng->reg_write_in_progress = true;
1053 qdf_atomic_inc(&hal_soc->active_work_cnt);
1054
1055 hal_verbose_debug("write_idx %u srng ring id 0x%x addr 0x%pK val %u",
1056 write_idx, srng->ring_id, addr, value);
1057
1058 qdf_queue_work(hal_soc->qdf_dev, hal_soc->reg_write_wq,
1059 &hal_soc->reg_write_work);
1060 }
1061
1062 /**
1063 * hal_delayed_reg_write_init() - Initialization function for delayed reg writes
1064 * @hal: hal_soc pointer
1065 *
1066 * Initialize main data structures to process register writes in a delayed
1067 * workqueue.
1068 *
1069 * Return: QDF_STATUS_SUCCESS on success else a QDF error.
1070 */
hal_delayed_reg_write_init(struct hal_soc * hal)1071 static QDF_STATUS hal_delayed_reg_write_init(struct hal_soc *hal)
1072 {
1073 hal->reg_write_wq =
1074 qdf_alloc_high_prior_ordered_workqueue("hal_register_write_wq");
1075 qdf_create_work(0, &hal->reg_write_work, hal_reg_write_work, hal);
1076 hal->reg_write_queue = qdf_mem_malloc(HAL_REG_WRITE_QUEUE_LEN *
1077 sizeof(*hal->reg_write_queue));
1078 if (!hal->reg_write_queue) {
1079 hal_err("unable to allocate memory");
1080 QDF_BUG(0);
1081 return QDF_STATUS_E_NOMEM;
1082 }
1083
1084 /* Initial value of indices */
1085 hal->read_idx = 0;
1086 qdf_atomic_set(&hal->write_idx, -1);
1087 return QDF_STATUS_SUCCESS;
1088 }
1089
1090 /**
1091 * hal_delayed_reg_write_deinit() - De-Initialize delayed reg write processing
1092 * @hal: hal_soc pointer
1093 *
1094 * De-initialize main data structures to process register writes in a delayed
1095 * workqueue.
1096 *
1097 * Return: None
1098 */
hal_delayed_reg_write_deinit(struct hal_soc * hal)1099 static void hal_delayed_reg_write_deinit(struct hal_soc *hal)
1100 {
1101 __hal_flush_reg_write_work(hal);
1102
1103 qdf_flush_workqueue(0, hal->reg_write_wq);
1104 qdf_destroy_workqueue(0, hal->reg_write_wq);
1105 qdf_mem_free(hal->reg_write_queue);
1106 }
1107
1108 #else
hal_delayed_reg_write_init(struct hal_soc * hal)1109 static inline QDF_STATUS hal_delayed_reg_write_init(struct hal_soc *hal)
1110 {
1111 return QDF_STATUS_SUCCESS;
1112 }
1113
hal_delayed_reg_write_deinit(struct hal_soc * hal)1114 static inline void hal_delayed_reg_write_deinit(struct hal_soc *hal)
1115 {
1116 }
1117 #endif
1118
1119 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
1120 #ifdef HAL_RECORD_SUSPEND_WRITE
1121 static struct hal_suspend_write_history
1122 g_hal_suspend_write_history[HAL_SUSPEND_WRITE_HISTORY_MAX];
1123
1124 static
hal_event_suspend_record(uint8_t ring_id,uint32_t value,uint32_t count)1125 void hal_event_suspend_record(uint8_t ring_id, uint32_t value, uint32_t count)
1126 {
1127 uint32_t index = qdf_atomic_read(g_hal_suspend_write_history.index) &
1128 (HAL_SUSPEND_WRITE_HISTORY_MAX - 1);
1129 struct hal_suspend_write_record *cur_event =
1130 &hal_suspend_write_event.record[index];
1131
1132 cur_event->ts = qdf_get_log_timestamp();
1133 cur_event->ring_id = ring_id;
1134 cur_event->value = value;
1135 cur_event->direct_wcount = count;
1136 qdf_atomic_inc(g_hal_suspend_write_history.index);
1137 }
1138
1139 static inline
hal_record_suspend_write(uint8_t ring_id,uint32_t value,uint32_t count)1140 void hal_record_suspend_write(uint8_t ring_id, uint32_t value, uint32_t count)
1141 {
1142 if (hif_rtpm_get_state() >= HIF_RTPM_STATE_SUSPENDING)
1143 hal_event_suspend_record(ring_id, value, count);
1144 }
1145 #else
1146 static inline
hal_record_suspend_write(uint8_t ring_id,uint32_t value,uint32_t count)1147 void hal_record_suspend_write(uint8_t ring_id, uint32_t value, uint32_t count)
1148 {
1149 }
1150 #endif
1151
1152 #ifdef QCA_WIFI_QCA6750
hal_delayed_reg_write(struct hal_soc * hal_soc,struct hal_srng * srng,void __iomem * addr,uint32_t value)1153 void hal_delayed_reg_write(struct hal_soc *hal_soc,
1154 struct hal_srng *srng,
1155 void __iomem *addr,
1156 uint32_t value)
1157 {
1158 uint8_t vote_access;
1159
1160 switch (srng->ring_type) {
1161 case CE_SRC:
1162 case CE_DST:
1163 case CE_DST_STATUS:
1164 vote_access = hif_get_ep_vote_access(hal_soc->hif_handle,
1165 HIF_EP_VOTE_NONDP_ACCESS);
1166 if ((vote_access == HIF_EP_VOTE_ACCESS_DISABLE) ||
1167 (vote_access == HIF_EP_VOTE_INTERMEDIATE_ACCESS &&
1168 PLD_MHI_STATE_L0 ==
1169 pld_get_mhi_state(hal_soc->qdf_dev->dev))) {
1170 hal_write_address_32_mb(hal_soc, addr, value, false);
1171 hal_srng_update_last_hptp(srng);
1172 hal_srng_reg_his_add(srng, value);
1173 qdf_atomic_inc(&hal_soc->stats.wstats.direct);
1174 srng->wstats.direct++;
1175 } else {
1176 hal_reg_write_enqueue(hal_soc, srng, addr, value);
1177 }
1178 break;
1179 default:
1180 if (hif_get_ep_vote_access(hal_soc->hif_handle,
1181 HIF_EP_VOTE_DP_ACCESS) ==
1182 HIF_EP_VOTE_ACCESS_DISABLE ||
1183 hal_is_reg_write_tput_level_high(hal_soc) ||
1184 PLD_MHI_STATE_L0 ==
1185 pld_get_mhi_state(hal_soc->qdf_dev->dev)) {
1186 hal_write_address_32_mb(hal_soc, addr, value, false);
1187 hal_srng_reg_his_add(srng, value);
1188 qdf_atomic_inc(&hal_soc->stats.wstats.direct);
1189 srng->wstats.direct++;
1190 } else {
1191 hal_reg_write_enqueue(hal_soc, srng, addr, value);
1192 }
1193
1194 break;
1195 }
1196 }
1197 #else
hal_delayed_reg_write(struct hal_soc * hal_soc,struct hal_srng * srng,void __iomem * addr,uint32_t value)1198 void hal_delayed_reg_write(struct hal_soc *hal_soc,
1199 struct hal_srng *srng,
1200 void __iomem *addr,
1201 uint32_t value)
1202 {
1203 if (hal_is_reg_write_tput_level_high(hal_soc) ||
1204 pld_is_device_awake(hal_soc->qdf_dev->dev)) {
1205 qdf_atomic_inc(&hal_soc->stats.wstats.direct);
1206 srng->wstats.direct++;
1207 hal_write_address_32_mb(hal_soc, addr, value, false);
1208 hal_srng_update_last_hptp(srng);
1209 hal_srng_reg_his_add(srng, value);
1210 } else {
1211 hal_reg_write_enqueue(hal_soc, srng, addr, value);
1212 }
1213
1214 hal_record_suspend_write(srng->ring_id, value, srng->wstats.direct);
1215 }
1216 #endif
1217 #endif
1218
1219 #ifdef HAL_SRNG_REG_HIS_DEBUG
hal_free_srng_history(struct hal_soc * hal)1220 inline void hal_free_srng_history(struct hal_soc *hal)
1221 {
1222 int i;
1223
1224 for (i = 0; i < HAL_SRNG_ID_MAX; i++)
1225 qdf_mem_free(hal->srng_list[i].reg_his_ctx);
1226 }
1227
hal_alloc_srng_history(struct hal_soc * hal)1228 inline bool hal_alloc_srng_history(struct hal_soc *hal)
1229 {
1230 int i;
1231
1232 for (i = 0; i < HAL_SRNG_ID_MAX; i++) {
1233 hal->srng_list[i].reg_his_ctx =
1234 qdf_mem_malloc(sizeof(struct hal_srng_reg_his_ctx));
1235 if (!hal->srng_list[i].reg_his_ctx) {
1236 hal_err("srng_hist alloc failed");
1237 hal_free_srng_history(hal);
1238 return false;
1239 }
1240 }
1241
1242 return true;
1243 }
1244 #else
hal_free_srng_history(struct hal_soc * hal)1245 inline void hal_free_srng_history(struct hal_soc *hal)
1246 {
1247 }
1248
hal_alloc_srng_history(struct hal_soc * hal)1249 inline bool hal_alloc_srng_history(struct hal_soc *hal)
1250 {
1251 return true;
1252 }
1253 #endif
1254
hal_attach(struct hif_opaque_softc * hif_handle,qdf_device_t qdf_dev)1255 void *hal_attach(struct hif_opaque_softc *hif_handle, qdf_device_t qdf_dev)
1256 {
1257 struct hal_soc *hal;
1258 int i;
1259
1260 hal = qdf_mem_common_alloc(sizeof(*hal));
1261
1262 if (!hal) {
1263 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1264 "%s: hal_soc allocation failed", __func__);
1265 goto fail0;
1266 }
1267 hal->hif_handle = hif_handle;
1268 hal->dev_base_addr = hif_get_dev_ba(hif_handle); /* UMAC */
1269 hal->dev_base_addr_ce = hif_get_dev_ba_ce(hif_handle); /* CE */
1270 hal->dev_base_addr_cmem = hif_get_dev_ba_cmem(hif_handle); /* CMEM */
1271 hal->dev_base_addr_pmm = hif_get_dev_ba_pmm(hif_handle); /* PMM */
1272 hal->qdf_dev = qdf_dev;
1273 hal->shadow_rdptr_mem_vaddr = (uint32_t *)qdf_mem_alloc_consistent(
1274 qdf_dev, qdf_dev->dev, sizeof(*(hal->shadow_rdptr_mem_vaddr)) *
1275 HAL_SRNG_ID_MAX, &(hal->shadow_rdptr_mem_paddr));
1276 if (!hal->shadow_rdptr_mem_paddr) {
1277 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1278 "%s: hal->shadow_rdptr_mem_paddr allocation failed",
1279 __func__);
1280 goto fail1;
1281 }
1282 qdf_mem_zero(hal->shadow_rdptr_mem_vaddr,
1283 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX);
1284
1285 hal->shadow_wrptr_mem_vaddr =
1286 (uint32_t *)qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev,
1287 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS,
1288 &(hal->shadow_wrptr_mem_paddr));
1289 if (!hal->shadow_wrptr_mem_vaddr) {
1290 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1291 "%s: hal->shadow_wrptr_mem_vaddr allocation failed",
1292 __func__);
1293 goto fail2;
1294 }
1295 qdf_mem_zero(hal->shadow_wrptr_mem_vaddr,
1296 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS);
1297
1298 if (!hal_alloc_srng_history(hal))
1299 goto fail2;
1300
1301 for (i = 0; i < HAL_SRNG_ID_MAX; i++) {
1302 hal->srng_list[i].initialized = 0;
1303 hal->srng_list[i].ring_id = i;
1304 }
1305
1306 qdf_spinlock_create(&hal->register_access_lock);
1307 hal->register_window = 0;
1308 hal->target_type = hal_get_target_type(hal_soc_to_hal_soc_handle(hal));
1309 hal->version = hif_get_soc_version(hif_handle);
1310 hal->ops = qdf_mem_malloc(sizeof(*hal->ops));
1311
1312 if (!hal->ops) {
1313 hal_err("unable to allocable memory for HAL ops");
1314 goto fail3;
1315 }
1316
1317 hal_target_based_configure(hal);
1318
1319 hal_reg_write_fail_history_init(hal);
1320
1321 qdf_minidump_log(hal, sizeof(*hal), "hal_soc");
1322
1323 qdf_ssr_driver_dump_register_region("hal_soc", hal, sizeof(*hal));
1324
1325 qdf_atomic_init(&hal->active_work_cnt);
1326 if (hal_delayed_reg_write_init(hal) != QDF_STATUS_SUCCESS) {
1327 hal_err("unable to initialize delayed reg write");
1328 goto fail4;
1329 }
1330
1331 hif_rtpm_register(HIF_RTPM_ID_HAL_REO_CMD, NULL);
1332
1333 return (void *)hal;
1334 fail4:
1335 qdf_ssr_driver_dump_unregister_region("hal_soc");
1336 qdf_minidump_remove(hal, sizeof(*hal), "hal_soc");
1337 qdf_mem_free(hal->ops);
1338 fail3:
1339 qdf_mem_free_consistent(qdf_dev, qdf_dev->dev,
1340 sizeof(*hal->shadow_wrptr_mem_vaddr) *
1341 HAL_MAX_LMAC_RINGS,
1342 hal->shadow_wrptr_mem_vaddr,
1343 hal->shadow_wrptr_mem_paddr, 0);
1344 fail2:
1345 qdf_mem_free_consistent(qdf_dev, qdf_dev->dev,
1346 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX,
1347 hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0);
1348 fail1:
1349 qdf_mem_common_free(hal);
1350 fail0:
1351 return NULL;
1352 }
1353 qdf_export_symbol(hal_attach);
1354
hal_get_meminfo(hal_soc_handle_t hal_soc_hdl,struct hal_mem_info * mem)1355 void hal_get_meminfo(hal_soc_handle_t hal_soc_hdl, struct hal_mem_info *mem)
1356 {
1357 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl;
1358 mem->dev_base_addr = (void *)hal->dev_base_addr;
1359 mem->shadow_rdptr_mem_vaddr = (void *)hal->shadow_rdptr_mem_vaddr;
1360 mem->shadow_wrptr_mem_vaddr = (void *)hal->shadow_wrptr_mem_vaddr;
1361 mem->shadow_rdptr_mem_paddr = (void *)hal->shadow_rdptr_mem_paddr;
1362 mem->shadow_wrptr_mem_paddr = (void *)hal->shadow_wrptr_mem_paddr;
1363 hif_read_phy_mem_base((void *)hal->hif_handle,
1364 (qdf_dma_addr_t *)&mem->dev_base_paddr);
1365 mem->lmac_srng_start_id = HAL_SRNG_LMAC1_ID_START;
1366 return;
1367 }
1368 qdf_export_symbol(hal_get_meminfo);
1369
hal_detach(void * hal_soc)1370 void hal_detach(void *hal_soc)
1371 {
1372 struct hal_soc *hal = (struct hal_soc *)hal_soc;
1373
1374 hif_rtpm_deregister(HIF_RTPM_ID_HAL_REO_CMD);
1375 hal_delayed_reg_write_deinit(hal);
1376 hal_reo_shared_qaddr_detach((hal_soc_handle_t)hal);
1377 qdf_ssr_driver_dump_unregister_region("hal_soc");
1378 qdf_minidump_remove(hal, sizeof(*hal), "hal_soc");
1379 qdf_mem_free(hal->ops);
1380
1381 hal_free_srng_history(hal);
1382 qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev,
1383 sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX,
1384 hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0);
1385 qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev,
1386 sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS,
1387 hal->shadow_wrptr_mem_vaddr, hal->shadow_wrptr_mem_paddr, 0);
1388 qdf_mem_common_free(hal);
1389
1390 return;
1391 }
1392 qdf_export_symbol(hal_detach);
1393
1394 #define HAL_CE_CHANNEL_DST_DEST_CTRL_ADDR(x) ((x) + 0x000000b0)
1395 #define HAL_CE_CHANNEL_DST_DEST_CTRL_DEST_MAX_LENGTH_BMSK 0x0000ffff
1396 #define HAL_CE_CHANNEL_DST_DEST_RING_CONSUMER_PREFETCH_TIMER_ADDR(x) ((x) + 0x00000040)
1397 #define HAL_CE_CHANNEL_DST_DEST_RING_CONSUMER_PREFETCH_TIMER_RMSK 0x00000007
1398
1399 /**
1400 * hal_ce_dst_setup() - Initialize CE destination ring registers
1401 * @hal: HAL SOC handle
1402 * @srng: SRNG ring pointer
1403 * @ring_num: ring number
1404 */
hal_ce_dst_setup(struct hal_soc * hal,struct hal_srng * srng,int ring_num)1405 static inline void hal_ce_dst_setup(struct hal_soc *hal, struct hal_srng *srng,
1406 int ring_num)
1407 {
1408 uint32_t reg_val = 0;
1409 uint32_t reg_addr;
1410 struct hal_hw_srng_config *ring_config =
1411 HAL_SRNG_CONFIG(hal, CE_DST);
1412
1413 /* set DEST_MAX_LENGTH according to ce assignment */
1414 reg_addr = HAL_CE_CHANNEL_DST_DEST_CTRL_ADDR(
1415 ring_config->reg_start[R0_INDEX] +
1416 (ring_num * ring_config->reg_size[R0_INDEX]));
1417
1418 reg_val = HAL_REG_READ(hal, reg_addr);
1419 reg_val &= ~HAL_CE_CHANNEL_DST_DEST_CTRL_DEST_MAX_LENGTH_BMSK;
1420 reg_val |= srng->u.dst_ring.max_buffer_length &
1421 HAL_CE_CHANNEL_DST_DEST_CTRL_DEST_MAX_LENGTH_BMSK;
1422 HAL_REG_WRITE(hal, reg_addr, reg_val);
1423
1424 if (srng->prefetch_timer) {
1425 reg_addr = HAL_CE_CHANNEL_DST_DEST_RING_CONSUMER_PREFETCH_TIMER_ADDR(
1426 ring_config->reg_start[R0_INDEX] +
1427 (ring_num * ring_config->reg_size[R0_INDEX]));
1428
1429 reg_val = HAL_REG_READ(hal, reg_addr);
1430 reg_val &= ~HAL_CE_CHANNEL_DST_DEST_RING_CONSUMER_PREFETCH_TIMER_RMSK;
1431 reg_val |= srng->prefetch_timer;
1432 HAL_REG_WRITE(hal, reg_addr, reg_val);
1433 reg_val = HAL_REG_READ(hal, reg_addr);
1434 }
1435
1436 }
1437
hal_reo_read_write_ctrl_ix(hal_soc_handle_t hal_soc_hdl,bool read,uint32_t * ix0,uint32_t * ix1,uint32_t * ix2,uint32_t * ix3)1438 void hal_reo_read_write_ctrl_ix(hal_soc_handle_t hal_soc_hdl, bool read,
1439 uint32_t *ix0, uint32_t *ix1,
1440 uint32_t *ix2, uint32_t *ix3)
1441 {
1442 uint32_t reg_offset;
1443 struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl;
1444 uint32_t reo_reg_base;
1445
1446 reo_reg_base = hal_get_reo_reg_base_offset(hal_soc_hdl);
1447
1448 if (read) {
1449 if (ix0) {
1450 reg_offset =
1451 HAL_REO_DESTINATION_RING_CTRL_IX_0_ADDR(
1452 reo_reg_base);
1453 *ix0 = HAL_REG_READ(hal, reg_offset);
1454 }
1455
1456 if (ix1) {
1457 reg_offset =
1458 HAL_REO_DESTINATION_RING_CTRL_IX_1_ADDR(
1459 reo_reg_base);
1460 *ix1 = HAL_REG_READ(hal, reg_offset);
1461 }
1462
1463 if (ix2) {
1464 reg_offset =
1465 HAL_REO_DESTINATION_RING_CTRL_IX_2_ADDR(
1466 reo_reg_base);
1467 *ix2 = HAL_REG_READ(hal, reg_offset);
1468 }
1469
1470 if (ix3) {
1471 reg_offset =
1472 HAL_REO_DESTINATION_RING_CTRL_IX_3_ADDR(
1473 reo_reg_base);
1474 *ix3 = HAL_REG_READ(hal, reg_offset);
1475 }
1476 } else {
1477 if (ix0) {
1478 reg_offset =
1479 HAL_REO_DESTINATION_RING_CTRL_IX_0_ADDR(
1480 reo_reg_base);
1481 HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset,
1482 *ix0, true);
1483 }
1484
1485 if (ix1) {
1486 reg_offset =
1487 HAL_REO_DESTINATION_RING_CTRL_IX_1_ADDR(
1488 reo_reg_base);
1489 HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset,
1490 *ix1, true);
1491 }
1492
1493 if (ix2) {
1494 reg_offset =
1495 HAL_REO_DESTINATION_RING_CTRL_IX_2_ADDR(
1496 reo_reg_base);
1497 HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset,
1498 *ix2, true);
1499 }
1500
1501 if (ix3) {
1502 reg_offset =
1503 HAL_REO_DESTINATION_RING_CTRL_IX_3_ADDR(
1504 reo_reg_base);
1505 HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset,
1506 *ix3, true);
1507 }
1508 }
1509 }
1510
1511 qdf_export_symbol(hal_reo_read_write_ctrl_ix);
1512
hal_srng_dst_set_hp_paddr_confirm(struct hal_srng * srng,uint64_t paddr)1513 void hal_srng_dst_set_hp_paddr_confirm(struct hal_srng *srng, uint64_t paddr)
1514 {
1515 SRNG_DST_REG_WRITE_CONFIRM(srng, HP_ADDR_LSB, paddr & 0xffffffff);
1516 SRNG_DST_REG_WRITE_CONFIRM(srng, HP_ADDR_MSB, paddr >> 32);
1517 }
1518
1519 qdf_export_symbol(hal_srng_dst_set_hp_paddr_confirm);
1520
hal_srng_dst_init_hp(struct hal_soc_handle * hal_soc,struct hal_srng * srng,uint32_t * vaddr)1521 void hal_srng_dst_init_hp(struct hal_soc_handle *hal_soc,
1522 struct hal_srng *srng,
1523 uint32_t *vaddr)
1524 {
1525 uint32_t reg_offset;
1526 struct hal_soc *hal = (struct hal_soc *)hal_soc;
1527
1528 if (!srng)
1529 return;
1530
1531 srng->u.dst_ring.hp_addr = vaddr;
1532 reg_offset = SRNG_DST_ADDR(srng, HP) - hal->dev_base_addr;
1533 HAL_REG_WRITE_CONFIRM_RETRY(
1534 hal, reg_offset, srng->u.dst_ring.cached_hp, true);
1535
1536 if (vaddr) {
1537 *srng->u.dst_ring.hp_addr = srng->u.dst_ring.cached_hp;
1538 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1539 "hp_addr=%pK, cached_hp=%d",
1540 (void *)srng->u.dst_ring.hp_addr,
1541 srng->u.dst_ring.cached_hp);
1542 }
1543 }
1544
1545 qdf_export_symbol(hal_srng_dst_init_hp);
1546
hal_srng_dst_update_hp_addr(struct hal_soc_handle * hal_soc,hal_ring_handle_t hal_ring_hdl)1547 void hal_srng_dst_update_hp_addr(struct hal_soc_handle *hal_soc,
1548 hal_ring_handle_t hal_ring_hdl)
1549 {
1550 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1551 int32_t hw_hp;
1552 int32_t hw_tp;
1553
1554 if (!srng)
1555 return;
1556
1557 if (srng->u.dst_ring.hp_addr) {
1558 hal_get_hw_hptp(hal_soc, hal_ring_hdl, &hw_hp, &hw_tp,
1559 WBM2SW_RELEASE);
1560 *srng->u.dst_ring.hp_addr = hw_hp;
1561 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
1562 "hw_hp=%d", hw_hp);
1563 }
1564 }
1565
1566 qdf_export_symbol(hal_srng_dst_update_hp_addr);
1567
1568 /**
1569 * hal_srng_hw_init - Private function to initialize SRNG HW
1570 * @hal: HAL SOC handle
1571 * @srng: SRNG ring pointer
1572 * @idle_check: Check if ring is idle
1573 * @idx: ring index
1574 */
hal_srng_hw_init(struct hal_soc * hal,struct hal_srng * srng,bool idle_check,uint32_t idx)1575 static inline void hal_srng_hw_init(struct hal_soc *hal,
1576 struct hal_srng *srng, bool idle_check, uint32_t idx)
1577 {
1578 if (srng->ring_dir == HAL_SRNG_SRC_RING)
1579 hal_srng_src_hw_init(hal, srng, idle_check, idx);
1580 else
1581 hal_srng_dst_hw_init(hal, srng, idle_check, idx);
1582 }
1583
1584 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
hal_srng_is_near_full_irq_supported(hal_soc_handle_t hal_soc,int ring_type,int ring_num)1585 bool hal_srng_is_near_full_irq_supported(hal_soc_handle_t hal_soc,
1586 int ring_type, int ring_num)
1587 {
1588 struct hal_soc *hal = (struct hal_soc *)hal_soc;
1589 struct hal_hw_srng_config *ring_config =
1590 HAL_SRNG_CONFIG(hal, ring_type);
1591
1592 return ring_config->nf_irq_support;
1593 }
1594
1595 /**
1596 * hal_srng_set_msi2_params() - Set MSI2 params to SRNG data structure from
1597 * ring params
1598 * @srng: SRNG handle
1599 * @ring_params: ring params for this SRNG
1600 *
1601 * Return: None
1602 */
1603 static inline void
hal_srng_set_msi2_params(struct hal_srng * srng,struct hal_srng_params * ring_params)1604 hal_srng_set_msi2_params(struct hal_srng *srng,
1605 struct hal_srng_params *ring_params)
1606 {
1607 srng->msi2_addr = ring_params->msi2_addr;
1608 srng->msi2_data = ring_params->msi2_data;
1609 }
1610
1611 /**
1612 * hal_srng_get_nf_params() - Get the near full MSI2 params from srng
1613 * @srng: SRNG handle
1614 * @ring_params: ring params for this SRNG
1615 *
1616 * Return: None
1617 */
1618 static inline void
hal_srng_get_nf_params(struct hal_srng * srng,struct hal_srng_params * ring_params)1619 hal_srng_get_nf_params(struct hal_srng *srng,
1620 struct hal_srng_params *ring_params)
1621 {
1622 ring_params->msi2_addr = srng->msi2_addr;
1623 ring_params->msi2_data = srng->msi2_data;
1624 }
1625
1626 /**
1627 * hal_srng_set_nf_thresholds() - Set the near full thresholds in SRNG
1628 * @srng: SRNG handle where the params are to be set
1629 * @ring_params: ring params, from where threshold is to be fetched
1630 *
1631 * Return: None
1632 */
1633 static inline void
hal_srng_set_nf_thresholds(struct hal_srng * srng,struct hal_srng_params * ring_params)1634 hal_srng_set_nf_thresholds(struct hal_srng *srng,
1635 struct hal_srng_params *ring_params)
1636 {
1637 srng->u.dst_ring.nf_irq_support = ring_params->nf_irq_support;
1638 srng->u.dst_ring.high_thresh = ring_params->high_thresh;
1639 }
1640 #else
1641 static inline void
hal_srng_set_msi2_params(struct hal_srng * srng,struct hal_srng_params * ring_params)1642 hal_srng_set_msi2_params(struct hal_srng *srng,
1643 struct hal_srng_params *ring_params)
1644 {
1645 }
1646
1647 static inline void
hal_srng_get_nf_params(struct hal_srng * srng,struct hal_srng_params * ring_params)1648 hal_srng_get_nf_params(struct hal_srng *srng,
1649 struct hal_srng_params *ring_params)
1650 {
1651 }
1652
1653 static inline void
hal_srng_set_nf_thresholds(struct hal_srng * srng,struct hal_srng_params * ring_params)1654 hal_srng_set_nf_thresholds(struct hal_srng *srng,
1655 struct hal_srng_params *ring_params)
1656 {
1657 }
1658 #endif
1659
1660 #if defined(CLEAR_SW2TCL_CONSUMED_DESC)
1661 /**
1662 * hal_srng_last_desc_cleared_init - Initialize SRNG last_desc_cleared ptr
1663 * @srng: Source ring pointer
1664 *
1665 * Return: None
1666 */
1667 static inline
hal_srng_last_desc_cleared_init(struct hal_srng * srng)1668 void hal_srng_last_desc_cleared_init(struct hal_srng *srng)
1669 {
1670 srng->last_desc_cleared = srng->ring_size - srng->entry_size;
1671 }
1672
1673 #else
1674 static inline
hal_srng_last_desc_cleared_init(struct hal_srng * srng)1675 void hal_srng_last_desc_cleared_init(struct hal_srng *srng)
1676 {
1677 }
1678 #endif /* CLEAR_SW2TCL_CONSUMED_DESC */
1679
1680 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING
hal_srng_update_high_wm_thresholds(struct hal_srng * srng)1681 static inline void hal_srng_update_high_wm_thresholds(struct hal_srng *srng)
1682 {
1683 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_90_to_100] =
1684 ((srng->num_entries * 90) / 100);
1685 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_80_to_90] =
1686 ((srng->num_entries * 80) / 100);
1687 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_70_to_80] =
1688 ((srng->num_entries * 70) / 100);
1689 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_60_to_70] =
1690 ((srng->num_entries * 60) / 100);
1691 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_50_to_60] =
1692 ((srng->num_entries * 50) / 100);
1693 /* Below 50% threshold is not needed */
1694 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_BELOW_50_PERCENT] = 0;
1695
1696 hal_info("ring_id: %u, wm_thresh- <50:%u, 50-60:%u, 60-70:%u, 70-80:%u, 80-90:%u, 90-100:%u",
1697 srng->ring_id,
1698 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_BELOW_50_PERCENT],
1699 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_50_to_60],
1700 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_60_to_70],
1701 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_70_to_80],
1702 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_80_to_90],
1703 srng->high_wm.bin_thresh[HAL_SRNG_HIGH_WM_BIN_90_to_100]);
1704 }
1705 #else
hal_srng_update_high_wm_thresholds(struct hal_srng * srng)1706 static inline void hal_srng_update_high_wm_thresholds(struct hal_srng *srng)
1707 {
1708 }
1709 #endif
1710
hal_srng_setup_idx(void * hal_soc,int ring_type,int ring_num,int mac_id,struct hal_srng_params * ring_params,bool idle_check,uint32_t idx)1711 void *hal_srng_setup_idx(void *hal_soc, int ring_type, int ring_num, int mac_id,
1712 struct hal_srng_params *ring_params, bool idle_check,
1713 uint32_t idx)
1714 {
1715 int ring_id;
1716 struct hal_soc *hal = (struct hal_soc *)hal_soc;
1717 hal_soc_handle_t hal_hdl = (hal_soc_handle_t)hal;
1718 struct hal_srng *srng;
1719 struct hal_hw_srng_config *ring_config =
1720 HAL_SRNG_CONFIG(hal, ring_type);
1721 void *dev_base_addr;
1722 int i;
1723
1724 ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, mac_id);
1725 if (ring_id < 0)
1726 return NULL;
1727
1728 hal_verbose_debug("mac_id %d ring_id %d", mac_id, ring_id);
1729
1730 srng = hal_get_srng(hal_soc, ring_id);
1731
1732 if (srng->initialized) {
1733 hal_verbose_debug("Ring (ring_type, ring_num) already initialized");
1734 return NULL;
1735 }
1736
1737 hal_srng_reg_his_init(srng);
1738 dev_base_addr = hal->dev_base_addr;
1739 srng->ring_id = ring_id;
1740 srng->ring_type = ring_type;
1741 srng->ring_dir = ring_config->ring_dir;
1742 srng->ring_base_paddr = ring_params->ring_base_paddr;
1743 srng->ring_base_vaddr = ring_params->ring_base_vaddr;
1744 srng->entry_size = ring_config->entry_size;
1745 srng->num_entries = ring_params->num_entries;
1746 srng->ring_size = srng->num_entries * srng->entry_size;
1747 srng->ring_size_mask = srng->ring_size - 1;
1748 srng->ring_vaddr_end = srng->ring_base_vaddr + srng->ring_size;
1749 srng->msi_addr = ring_params->msi_addr;
1750 srng->msi_data = ring_params->msi_data;
1751 srng->intr_timer_thres_us = ring_params->intr_timer_thres_us;
1752 srng->intr_batch_cntr_thres_entries =
1753 ring_params->intr_batch_cntr_thres_entries;
1754 srng->pointer_timer_threshold =
1755 ring_params->pointer_timer_threshold;
1756 srng->pointer_num_threshold =
1757 ring_params->pointer_num_threshold;
1758
1759 if (!idle_check)
1760 srng->prefetch_timer = ring_params->prefetch_timer;
1761 srng->hal_soc = hal_soc;
1762 hal_srng_set_msi2_params(srng, ring_params);
1763 hal_srng_update_high_wm_thresholds(srng);
1764
1765 for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) {
1766 srng->hwreg_base[i] = dev_base_addr + ring_config->reg_start[i]
1767 + (ring_num * ring_config->reg_size[i]);
1768 }
1769
1770 /* Zero out the entire ring memory */
1771 qdf_mem_zero(srng->ring_base_vaddr, (srng->entry_size *
1772 srng->num_entries) << 2);
1773
1774 srng->flags = ring_params->flags;
1775
1776 /* For cached descriptors flush and invalidate the memory*/
1777 if (srng->flags & HAL_SRNG_CACHED_DESC) {
1778 qdf_nbuf_dma_clean_range(
1779 srng->ring_base_vaddr,
1780 srng->ring_base_vaddr +
1781 ((srng->entry_size * srng->num_entries)));
1782 qdf_nbuf_dma_inv_range(
1783 srng->ring_base_vaddr,
1784 srng->ring_base_vaddr +
1785 ((srng->entry_size * srng->num_entries)));
1786 }
1787 #ifdef BIG_ENDIAN_HOST
1788 /* TODO: See if we should we get these flags from caller */
1789 srng->flags |= HAL_SRNG_DATA_TLV_SWAP;
1790 srng->flags |= HAL_SRNG_MSI_SWAP;
1791 srng->flags |= HAL_SRNG_RING_PTR_SWAP;
1792 #endif
1793
1794 hal_srng_last_desc_cleared_init(srng);
1795
1796 if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1797 srng->u.src_ring.hp = 0;
1798 srng->u.src_ring.reap_hp = srng->ring_size -
1799 srng->entry_size;
1800 srng->u.src_ring.tp_addr =
1801 &(hal->shadow_rdptr_mem_vaddr[ring_id]);
1802 srng->u.src_ring.low_threshold =
1803 ring_params->low_threshold * srng->entry_size;
1804
1805 if (srng->u.src_ring.tp_addr)
1806 qdf_mem_zero(srng->u.src_ring.tp_addr,
1807 sizeof(*hal->shadow_rdptr_mem_vaddr));
1808
1809 if (ring_config->lmac_ring) {
1810 /* For LMAC rings, head pointer updates will be done
1811 * through FW by writing to a shared memory location
1812 */
1813 srng->u.src_ring.hp_addr =
1814 &(hal->shadow_wrptr_mem_vaddr[ring_id -
1815 HAL_SRNG_LMAC1_ID_START]);
1816 srng->flags |= HAL_SRNG_LMAC_RING;
1817
1818 if (srng->u.src_ring.hp_addr)
1819 qdf_mem_zero(srng->u.src_ring.hp_addr,
1820 sizeof(*hal->shadow_wrptr_mem_vaddr));
1821
1822 } else if (ignore_shadow || (srng->u.src_ring.hp_addr == 0)) {
1823 srng->u.src_ring.hp_addr =
1824 hal_get_window_address(hal,
1825 SRNG_SRC_ADDR(srng, HP));
1826
1827 if (CHECK_SHADOW_REGISTERS) {
1828 QDF_TRACE(QDF_MODULE_ID_TXRX,
1829 QDF_TRACE_LEVEL_ERROR,
1830 "%s: Ring (%d, %d) missing shadow config",
1831 __func__, ring_type, ring_num);
1832 }
1833 } else {
1834 hal_validate_shadow_register(hal,
1835 SRNG_SRC_ADDR(srng, HP),
1836 srng->u.src_ring.hp_addr);
1837 }
1838 } else {
1839 /* During initialization loop count in all the descriptors
1840 * will be set to zero, and HW will set it to 1 on completing
1841 * descriptor update in first loop, and increments it by 1 on
1842 * subsequent loops (loop count wraps around after reaching
1843 * 0xffff). The 'loop_cnt' in SW ring state is the expected
1844 * loop count in descriptors updated by HW (to be processed
1845 * by SW).
1846 */
1847 hal_srng_set_nf_thresholds(srng, ring_params);
1848 srng->u.dst_ring.loop_cnt = 1;
1849 srng->u.dst_ring.tp = 0;
1850 srng->u.dst_ring.hp_addr =
1851 &(hal->shadow_rdptr_mem_vaddr[ring_id]);
1852
1853 if (srng->u.dst_ring.hp_addr)
1854 qdf_mem_zero(srng->u.dst_ring.hp_addr,
1855 sizeof(*hal->shadow_rdptr_mem_vaddr));
1856
1857 if (ring_config->lmac_ring) {
1858 /* For LMAC rings, tail pointer updates will be done
1859 * through FW by writing to a shared memory location
1860 */
1861 srng->u.dst_ring.tp_addr =
1862 &(hal->shadow_wrptr_mem_vaddr[ring_id -
1863 HAL_SRNG_LMAC1_ID_START]);
1864 srng->flags |= HAL_SRNG_LMAC_RING;
1865
1866 if (srng->u.dst_ring.tp_addr)
1867 qdf_mem_zero(srng->u.dst_ring.tp_addr,
1868 sizeof(*hal->shadow_wrptr_mem_vaddr));
1869
1870 } else if (ignore_shadow || srng->u.dst_ring.tp_addr == 0) {
1871 srng->u.dst_ring.tp_addr =
1872 hal_get_window_address(hal,
1873 SRNG_DST_ADDR(srng, TP));
1874
1875 if (CHECK_SHADOW_REGISTERS) {
1876 QDF_TRACE(QDF_MODULE_ID_TXRX,
1877 QDF_TRACE_LEVEL_ERROR,
1878 "%s: Ring (%d, %d) missing shadow config",
1879 __func__, ring_type, ring_num);
1880 }
1881 } else {
1882 hal_validate_shadow_register(hal,
1883 SRNG_DST_ADDR(srng, TP),
1884 srng->u.dst_ring.tp_addr);
1885 }
1886 }
1887
1888 if (!(ring_config->lmac_ring)) {
1889 /*
1890 * UMAC reset has idle check enabled.
1891 * During UMAC reset Tx ring halt is set
1892 * by Wi-Fi FW during pre-reset stage,
1893 * avoid Tx ring halt again.
1894 */
1895 if (idle_check && idx) {
1896 if (!hal->ops->hal_tx_ring_halt_get(hal_hdl)) {
1897 qdf_print("\nTx ring halt not set:Ring(%d, %d)",
1898 ring_type, ring_num);
1899 qdf_assert_always(0);
1900 }
1901 hal_srng_hw_init(hal, srng, idle_check, idx);
1902 goto ce_setup;
1903 }
1904
1905 if (idx) {
1906 hal->ops->hal_tx_ring_halt_set(hal_hdl);
1907 do {
1908 hal_info("Waiting for ring reset");
1909 } while (!(hal->ops->hal_tx_ring_halt_poll(hal_hdl)));
1910 }
1911 hal_srng_hw_init(hal, srng, idle_check, idx);
1912
1913 if (idx) {
1914 hal->ops->hal_tx_ring_halt_reset(hal_hdl);
1915 }
1916
1917 ce_setup:
1918 if (ring_type == CE_DST) {
1919 srng->u.dst_ring.max_buffer_length = ring_params->max_buffer_length;
1920 hal_ce_dst_setup(hal, srng, ring_num);
1921 }
1922 }
1923
1924 SRNG_LOCK_INIT(&srng->lock);
1925
1926 srng->srng_event = 0;
1927
1928 srng->initialized = true;
1929
1930 return (void *)srng;
1931 }
1932 qdf_export_symbol(hal_srng_setup_idx);
1933
1934 /**
1935 * hal_srng_setup - Initialize HW SRNG ring.
1936 * @hal_soc: Opaque HAL SOC handle
1937 * @ring_type: one of the types from hal_ring_type
1938 * @ring_num: Ring number if there are multiple rings of same type (staring
1939 * from 0)
1940 * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
1941 * @ring_params: SRNG ring params in hal_srng_params structure.
1942 * @idle_check: Check if ring is idle
1943 *
1944 * Callers are expected to allocate contiguous ring memory of size
1945 * 'num_entries * entry_size' bytes and pass the physical and virtual base
1946 * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in
1947 * hal_srng_params structure. Ring base address should be 8 byte aligned
1948 * and size of each ring entry should be queried using the API
1949 * hal_srng_get_entrysize
1950 *
1951 * Return: Opaque pointer to ring on success
1952 * NULL on failure (if given ring is not available)
1953 */
hal_srng_setup(void * hal_soc,int ring_type,int ring_num,int mac_id,struct hal_srng_params * ring_params,bool idle_check)1954 void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
1955 int mac_id, struct hal_srng_params *ring_params,
1956 bool idle_check)
1957 {
1958 return hal_srng_setup_idx(hal_soc, ring_type, ring_num, mac_id,
1959 ring_params, idle_check, 0);
1960 }
1961 qdf_export_symbol(hal_srng_setup);
1962
hal_srng_cleanup(void * hal_soc,hal_ring_handle_t hal_ring_hdl,bool umac_reset_inprogress)1963 void hal_srng_cleanup(void *hal_soc, hal_ring_handle_t hal_ring_hdl,
1964 bool umac_reset_inprogress)
1965 {
1966 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1967 SRNG_LOCK_DESTROY(&srng->lock);
1968 srng->initialized = 0;
1969 if (umac_reset_inprogress)
1970 hal_srng_hw_disable(hal_soc, srng);
1971 }
1972 qdf_export_symbol(hal_srng_cleanup);
1973
hal_srng_get_entrysize(void * hal_soc,int ring_type)1974 uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type)
1975 {
1976 struct hal_soc *hal = (struct hal_soc *)hal_soc;
1977 struct hal_hw_srng_config *ring_config =
1978 HAL_SRNG_CONFIG(hal, ring_type);
1979 return ring_config->entry_size << 2;
1980 }
1981 qdf_export_symbol(hal_srng_get_entrysize);
1982
hal_srng_max_entries(void * hal_soc,int ring_type)1983 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type)
1984 {
1985 struct hal_soc *hal = (struct hal_soc *)hal_soc;
1986 struct hal_hw_srng_config *ring_config =
1987 HAL_SRNG_CONFIG(hal, ring_type);
1988
1989 return ring_config->max_size / ring_config->entry_size;
1990 }
1991 qdf_export_symbol(hal_srng_max_entries);
1992
hal_srng_get_dir(void * hal_soc,int ring_type)1993 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type)
1994 {
1995 struct hal_soc *hal = (struct hal_soc *)hal_soc;
1996 struct hal_hw_srng_config *ring_config =
1997 HAL_SRNG_CONFIG(hal, ring_type);
1998
1999 return ring_config->ring_dir;
2000 }
2001
hal_srng_dump(struct hal_srng * srng)2002 void hal_srng_dump(struct hal_srng *srng)
2003 {
2004 if (srng->ring_dir == HAL_SRNG_SRC_RING) {
2005 hal_debug("=== SRC RING %d ===", srng->ring_id);
2006 hal_debug("hp %u, reap_hp %u, tp %u, cached tp %u",
2007 srng->u.src_ring.hp,
2008 srng->u.src_ring.reap_hp,
2009 *srng->u.src_ring.tp_addr,
2010 srng->u.src_ring.cached_tp);
2011 } else {
2012 hal_debug("=== DST RING %d ===", srng->ring_id);
2013 hal_debug("tp %u, hp %u, cached tp %u, loop_cnt %u",
2014 srng->u.dst_ring.tp,
2015 *srng->u.dst_ring.hp_addr,
2016 srng->u.dst_ring.cached_hp,
2017 srng->u.dst_ring.loop_cnt);
2018 }
2019 }
2020
hal_get_srng_params(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl,struct hal_srng_params * ring_params)2021 void hal_get_srng_params(hal_soc_handle_t hal_soc_hdl,
2022 hal_ring_handle_t hal_ring_hdl,
2023 struct hal_srng_params *ring_params)
2024 {
2025 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2026 int i =0;
2027 ring_params->ring_id = srng->ring_id;
2028 ring_params->ring_dir = srng->ring_dir;
2029 ring_params->entry_size = srng->entry_size;
2030
2031 ring_params->ring_base_paddr = srng->ring_base_paddr;
2032 ring_params->ring_base_vaddr = srng->ring_base_vaddr;
2033 ring_params->num_entries = srng->num_entries;
2034 ring_params->msi_addr = srng->msi_addr;
2035 ring_params->msi_data = srng->msi_data;
2036 ring_params->intr_timer_thres_us = srng->intr_timer_thres_us;
2037 ring_params->intr_batch_cntr_thres_entries =
2038 srng->intr_batch_cntr_thres_entries;
2039 ring_params->low_threshold = srng->u.src_ring.low_threshold;
2040 ring_params->flags = srng->flags;
2041 ring_params->ring_id = srng->ring_id;
2042 for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++)
2043 ring_params->hwreg_base[i] = srng->hwreg_base[i];
2044
2045 hal_srng_get_nf_params(srng, ring_params);
2046 }
2047 qdf_export_symbol(hal_get_srng_params);
2048
hal_set_low_threshold(hal_ring_handle_t hal_ring_hdl,uint32_t low_threshold)2049 void hal_set_low_threshold(hal_ring_handle_t hal_ring_hdl,
2050 uint32_t low_threshold)
2051 {
2052 struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2053 srng->u.src_ring.low_threshold = low_threshold * srng->entry_size;
2054 }
2055 qdf_export_symbol(hal_set_low_threshold);
2056
2057 #ifdef FEATURE_RUNTIME_PM
2058 void
hal_srng_rtpm_access_end(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl,uint32_t rtpm_id)2059 hal_srng_rtpm_access_end(hal_soc_handle_t hal_soc_hdl,
2060 hal_ring_handle_t hal_ring_hdl,
2061 uint32_t rtpm_id)
2062 {
2063 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2064
2065 if (qdf_unlikely(!hal_ring_hdl)) {
2066 qdf_print("Error: Invalid hal_ring\n");
2067 return;
2068 }
2069
2070 if (hif_rtpm_get(HIF_RTPM_GET_ASYNC, rtpm_id) == 0) {
2071 if (hif_system_pm_state_check(hal_soc->hif_handle)) {
2072 hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
2073 hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
2074 hal_srng_inc_flush_cnt(hal_ring_hdl);
2075 } else {
2076 hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
2077 }
2078
2079 hif_rtpm_put(HIF_RTPM_PUT_ASYNC, rtpm_id);
2080 } else {
2081 hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
2082 hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
2083 hal_srng_inc_flush_cnt(hal_ring_hdl);
2084 }
2085 }
2086
2087 qdf_export_symbol(hal_srng_rtpm_access_end);
2088 #endif /* FEATURE_RUNTIME_PM */
2089
2090 #ifdef FORCE_WAKE
hal_set_init_phase(hal_soc_handle_t soc,bool init_phase)2091 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase)
2092 {
2093 struct hal_soc *hal_soc = (struct hal_soc *)soc;
2094 hal_soc->init_phase = init_phase;
2095 }
2096 #endif /* FORCE_WAKE */
2097