1 /*
2 * Copyright (c) 2011-2020 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #ifndef _OL_CFG__H_
21 #define _OL_CFG__H_
22
23 #include <qdf_types.h> /* uint32_t */
24 #include <cdp_txrx_cmn.h> /* ol_pdev_handle */
25 #include <cds_ieee80211_common.h> /* ieee80211_qosframe_htc_addr4 */
26 #include <enet.h> /* LLC_SNAP_HDR_LEN */
27 #if defined(CONFIG_HL_SUPPORT)
28 #include "wlan_tgt_def_config_hl.h"
29 #else
30 #include "wlan_tgt_def_config.h"
31 #endif
32 #include "ol_txrx_ctrl_api.h" /* txrx_pdev_cfg_param_t */
33 #include <cdp_txrx_handle.h>
34 #include "qca_vendor.h"
35
36 /**
37 * @brief format of data frames delivered to/from the WLAN driver by/to the OS
38 */
39 enum wlan_frm_fmt {
40 wlan_frm_fmt_unknown,
41 wlan_frm_fmt_raw,
42 wlan_frm_fmt_native_wifi,
43 wlan_frm_fmt_802_3,
44 };
45
46 /* Max throughput */
47 #ifdef SLUB_MEM_OPTIMIZE
48 #define MAX_THROUGHPUT 400
49 #else
50 #define MAX_THROUGHPUT 800
51 #endif
52
53 /* Throttle period Different level Duty Cycle values*/
54 #define THROTTLE_DUTY_CYCLE_LEVEL0 (0)
55 #define THROTTLE_DUTY_CYCLE_LEVEL1 (50)
56 #define THROTTLE_DUTY_CYCLE_LEVEL2 (75)
57 #define THROTTLE_DUTY_CYCLE_LEVEL3 (94)
58
59 struct wlan_ipa_uc_rsc_t {
60 u8 uc_offload_enabled;
61 u32 tx_max_buf_cnt;
62 u32 tx_buf_size;
63 u32 rx_ind_ring_size;
64 u32 tx_partition_base;
65 };
66
67 /* Config parameters for txrx_pdev */
68 struct txrx_pdev_cfg_t {
69 u8 is_high_latency;
70 u8 defrag_timeout_check;
71 u8 rx_pn_check;
72 u8 pn_rx_fwd_check;
73 u8 host_addba;
74 u8 tx_free_at_download;
75 u8 rx_fwd_inter_bss;
76 u32 max_thruput_mbps;
77 u32 target_tx_credit;
78 u32 vow_config;
79 u32 tx_download_size;
80 u32 max_peer_id;
81 u32 max_vdev;
82 u32 max_nbuf_frags;
83 u32 throttle_period_ms;
84 u8 dutycycle_level[THROTTLE_LEVEL_MAX];
85 enum wlan_frm_fmt frame_type;
86 u8 rx_fwd_disabled;
87 u8 is_packet_log_enabled;
88 u8 is_full_reorder_offload;
89 #ifdef WLAN_FEATURE_TSF_PLUS
90 u8 is_ptp_rx_opt_enabled;
91 #endif
92 struct wlan_ipa_uc_rsc_t ipa_uc_rsc;
93 bool ip_tcp_udp_checksum_offload;
94 bool p2p_ip_tcp_udp_checksum_offload;
95 /* IP, TCP and UDP checksum offload for NAN Mode*/
96 bool nan_tcp_udp_checksumoffload;
97 bool enable_rxthread;
98 bool ce_classify_enabled;
99 #if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(QCA_LL_PDEV_TX_FLOW_CONTROL)
100 uint32_t tx_flow_stop_queue_th;
101 uint32_t tx_flow_start_queue_offset;
102 #endif
103 bool flow_steering_enabled;
104 /*
105 * To track if credit reporting through
106 * HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND is enabled/disabled.
107 * In Genoa(QCN7605) credits are reported through
108 * HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND only.
109 */
110 u8 credit_update_enabled;
111 struct ol_tx_sched_wrr_ac_specs_t ac_specs[QCA_WLAN_AC_ALL];
112 bool gro_enable;
113 bool tso_enable;
114 bool lro_enable;
115 bool sg_enable;
116 uint32_t enable_data_stall_detection;
117 bool enable_flow_steering;
118 bool disable_intra_bss_fwd;
119 /* IPA Micro controller data path offload TX buffer size */
120 uint32_t uc_tx_buffer_size;
121 /* IPA Micro controller data path offload RX indication ring count */
122 uint32_t uc_rx_indication_ring_count;
123 /* IPA Micro controller data path offload TX partition base */
124 uint32_t uc_tx_partition_base;
125 /* Flag to indicate whether new htt format is supported */
126 bool new_htt_format_enabled;
127
128 #ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
129 /* enable the tcp delay ack feature in the driver */
130 bool del_ack_enable;
131 /* timeout if no more tcp ack frames, unit is ms */
132 uint16_t del_ack_timer_value;
133 /* the maximum number of replaced tcp ack frames */
134 uint16_t del_ack_pkt_count;
135 #endif
136
137 #ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
138 uint16_t bundle_timer_value;
139 uint16_t bundle_size;
140 #endif
141 uint8_t pktlog_buffer_size;
142 };
143
144 /**
145 * ol_tx_set_flow_control_parameters() - set flow control parameters
146 * @cfg_ctx: cfg context
147 * @cfg_param: cfg parameters
148 *
149 * Return: none
150 */
151 #if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(QCA_LL_PDEV_TX_FLOW_CONTROL)
152 void ol_tx_set_flow_control_parameters(struct cdp_cfg *cfg_ctx,
153 struct txrx_pdev_cfg_param_t *cfg_param);
154 #else
155 static inline
ol_tx_set_flow_control_parameters(struct cdp_cfg * cfg_ctx,struct txrx_pdev_cfg_param_t * cfg_param)156 void ol_tx_set_flow_control_parameters(struct cdp_cfg *cfg_ctx,
157 struct txrx_pdev_cfg_param_t *cfg_param)
158 {
159 }
160 #endif
161
162 /**
163 * ol_pdev_cfg_attach - setup configuration parameters
164 * @osdev: OS handle needed as an argument for some OS primitives
165 * @cfg_param: configuration parameters
166 *
167 * Allocation configuration context that will be used across data path
168 *
169 * Return: the control device object
170 */
171 struct cdp_cfg *ol_pdev_cfg_attach(qdf_device_t osdev, void *pcfg_param);
172
173 /**
174 * @brief Specify whether the system is high-latency or low-latency.
175 * @details
176 * Indicate whether the system is operating in high-latency (message
177 * based, e.g. USB) mode or low-latency (memory-mapped, e.g. PCIe) mode.
178 * Some chips support just one type of host / target interface.
179 * Other chips support both LL and HL interfaces (e.g. PCIe and USB),
180 * so the selection will be made based on which bus HW is present, or
181 * which is preferred if both are present.
182 *
183 * @param pdev - handle to the physical device
184 * @return 1 -> high-latency -OR- 0 -> low-latency
185 */
186 int ol_cfg_is_high_latency(struct cdp_cfg *cfg_pdev);
187
188 /**
189 * @brief Specify whether credit reporting through
190 * HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND is enabled by default.
191 * In Genoa credits are reported only through
192 * HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND
193 * @details
194 * @param pdev - handle to the physical device
195 * @return 1 -> enabled -OR- 0 -> disabled
196 */
197 int ol_cfg_is_credit_update_enabled(struct cdp_cfg *cfg_pdev);
198
199 /**
200 * @brief Specify the range of peer IDs.
201 * @details
202 * Specify the maximum peer ID. This is the maximum number of peers,
203 * minus one.
204 * This is used by the host to determine the size of arrays indexed by
205 * peer ID.
206 *
207 * @param pdev - handle to the physical device
208 * @return maximum peer ID
209 */
210 int ol_cfg_max_peer_id(struct cdp_cfg *cfg_pdev);
211
212 /**
213 * @brief Specify the max number of virtual devices within a physical device.
214 * @details
215 * Specify how many virtual devices may exist within a physical device.
216 *
217 * @param pdev - handle to the physical device
218 * @return maximum number of virtual devices
219 */
220 int ol_cfg_max_vdevs(struct cdp_cfg *cfg_pdev);
221
222 /**
223 * @brief Check whether host-side rx PN check is enabled or disabled.
224 * @details
225 * Choose whether to allocate rx PN state information and perform
226 * rx PN checks (if applicable, based on security type) on the host.
227 * If the rx PN check is specified to be done on the host, the host SW
228 * will determine which peers are using a security type (e.g. CCMP) that
229 * requires a PN check.
230 *
231 * @param pdev - handle to the physical device
232 * @return 1 -> host performs rx PN check -OR- 0 -> no host-side rx PN check
233 */
234 int ol_cfg_rx_pn_check(struct cdp_cfg *cfg_pdev);
235
236 /**
237 * @brief Check whether host-side rx forwarding is enabled or disabled.
238 * @details
239 * Choose whether to check whether to forward rx frames to tx on the host.
240 * For LL systems, this rx -> tx host-side forwarding check is typically
241 * enabled.
242 * For HL systems, the rx -> tx forwarding check is typically done on the
243 * target. However, even in HL systems, the host-side rx -> tx forwarding
244 * will typically be enabled, as a second-tier safety net in case the
245 * target doesn't have enough memory to store all rx -> tx forwarded frames.
246 *
247 * @param pdev - handle to the physical device
248 * @return 1 -> host does rx->tx forward -OR- 0 -> no host-side rx->tx forward
249 */
250 int ol_cfg_rx_fwd_check(struct cdp_cfg *cfg_pdev);
251
252 /**
253 * ol_set_cfg_rx_fwd_disabled - set rx fwd disable/enable
254 *
255 * @pdev - handle to the physical device
256 * @disable_rx_fwd 1 -> no rx->tx forward -> rx->tx forward
257 *
258 * Choose whether to forward rx frames to tx (where applicable) within the
259 * WLAN driver, or to leave all forwarding up to the operating system.
260 * Currently only intra-bss fwd is supported.
261 *
262 */
263 void ol_set_cfg_rx_fwd_disabled(struct cdp_cfg *ppdev, uint8_t disable_rx_fwd);
264
265 /**
266 * ol_set_cfg_packet_log_enabled - Set packet log config in HTT
267 * config based on CFG ini configuration
268 *
269 * @pdev - handle to the physical device
270 * @val - 0 - disable, 1 - enable
271 */
272 void ol_set_cfg_packet_log_enabled(struct cdp_cfg *ppdev, uint8_t val);
273
274 /**
275 * @brief Check whether rx forwarding is enabled or disabled.
276 * @details
277 * Choose whether to forward rx frames to tx (where applicable) within the
278 * WLAN driver, or to leave all forwarding up to the operating system.
279 *
280 * @param pdev - handle to the physical device
281 * @return 1 -> no rx->tx forward -OR- 0 -> rx->tx forward (in host or target)
282 */
283 int ol_cfg_rx_fwd_disabled(struct cdp_cfg *cfg_pdev);
284
285 /**
286 * @brief Check whether to perform inter-BSS or intra-BSS rx->tx forwarding.
287 * @details
288 * Check whether data received by an AP on one virtual device destined
289 * to a STA associated with a different virtual device within the same
290 * physical device should be forwarded within the driver, or whether
291 * forwarding should only be done within a virtual device.
292 *
293 * @param pdev - handle to the physical device
294 * @return
295 * 1 -> forward both within and between vdevs
296 * -OR-
297 * 0 -> forward only within a vdev
298 */
299 int ol_cfg_rx_fwd_inter_bss(struct cdp_cfg *cfg_pdev);
300
301 /**
302 * @brief Specify data frame format used by the OS.
303 * @details
304 * Specify what type of frame (802.3 or native WiFi) the host data SW
305 * should expect from and provide to the OS shim.
306 *
307 * @param pdev - handle to the physical device
308 * @return enumerated data frame format
309 */
310 enum wlan_frm_fmt ol_cfg_frame_type(struct cdp_cfg *cfg_pdev);
311
312 /**
313 * @brief Specify the peak throughput.
314 * @details
315 * Specify the peak throughput that a system is expected to support.
316 * The data SW uses this configuration to help choose the size for its
317 * tx descriptor pool and rx buffer ring.
318 * The data SW assumes that the peak throughput applies to either rx or tx,
319 * rather than having separate specs of the rx max throughput vs. the tx
320 * max throughput.
321 *
322 * @param pdev - handle to the physical device
323 * @return maximum supported throughput in Mbps (not MBps)
324 */
325 int ol_cfg_max_thruput_mbps(struct cdp_cfg *cfg_pdev);
326
327 /**
328 * @brief Specify the maximum number of fragments per tx network buffer.
329 * @details
330 * Specify the maximum number of fragments that a tx frame provided to
331 * the WLAN driver by the OS may contain.
332 * In LL systems, the host data SW uses this maximum fragment count to
333 * determine how many elements to allocate in the fragmentation descriptor
334 * it creates to specify to the tx MAC DMA where to locate the tx frame's
335 * data.
336 * This maximum fragments count is only for regular frames, not TSO frames,
337 * since TSO frames are sent in segments with a limited number of fragments
338 * per segment.
339 *
340 * @param pdev - handle to the physical device
341 * @return maximum number of fragments that can occur in a regular tx frame
342 */
343 int ol_cfg_netbuf_frags_max(struct cdp_cfg *cfg_pdev);
344
345 /**
346 * @brief For HL systems, specify when to free tx frames.
347 * @details
348 * In LL systems, the host's tx frame is referenced by the MAC DMA, and
349 * thus cannot be freed until the target indicates that it is finished
350 * transmitting the frame.
351 * In HL systems, the entire tx frame is downloaded to the target.
352 * Consequently, the target has its own copy of the tx frame, and the
353 * host can free the tx frame as soon as the download completes.
354 * Alternatively, the HL host can keep the frame allocated until the
355 * target explicitly tells the HL host it is done transmitting the frame.
356 * This gives the target the option of discarding its copy of the tx
357 * frame, and then later getting a new copy from the host.
358 * This function tells the host whether it should retain its copy of the
359 * transmit frames until the target explicitly indicates it is finished
360 * transmitting them, or if it should free its copy as soon as the
361 * tx frame is downloaded to the target.
362 *
363 * @param pdev - handle to the physical device
364 * @return
365 * 0 -> retain the tx frame until the target indicates it is done
366 * transmitting the frame
367 * -OR-
368 * 1 -> free the tx frame as soon as the download completes
369 */
370 int ol_cfg_tx_free_at_download(struct cdp_cfg *cfg_pdev);
371 void ol_cfg_set_tx_free_at_download(struct cdp_cfg *cfg_pdev);
372
373 /**
374 * @brief Low water mark for target tx credit.
375 * Tx completion handler is invoked to reap the buffers when the target tx
376 * credit goes below Low Water Mark.
377 */
378 #define OL_CFG_NUM_MSDU_REAP 512
379 #define ol_cfg_tx_credit_lwm(pdev) \
380 ((CFG_TGT_NUM_MSDU_DESC > OL_CFG_NUM_MSDU_REAP) ? \
381 (CFG_TGT_NUM_MSDU_DESC - OL_CFG_NUM_MSDU_REAP) : 0)
382
383 /**
384 * @brief In a HL system, specify the target initial credit count.
385 * @details
386 * The HL host tx data SW includes a module for determining which tx frames
387 * to download to the target at a given time.
388 * To make this judgement, the HL tx download scheduler has to know
389 * how many buffers the HL target has available to hold tx frames.
390 * Due to the possibility that a single target buffer pool can be shared
391 * between rx and tx frames, the host may not be able to obtain a precise
392 * specification of the tx buffer space available in the target, but it
393 * uses the best estimate, as provided by this configuration function,
394 * to determine how best to schedule the tx frame downloads.
395 *
396 * @param pdev - handle to the physical device
397 * @return the number of tx buffers available in a HL target
398 */
399 uint16_t ol_cfg_target_tx_credit(struct cdp_cfg *cfg_pdev);
400
401 /**
402 * @brief Specify the LL tx MSDU header download size.
403 * @details
404 * In LL systems, determine how many bytes from a tx frame to download,
405 * in order to provide the target FW's Descriptor Engine with enough of
406 * the packet's payload to interpret what kind of traffic this is,
407 * and who it is for.
408 * This download size specification does not include the 802.3 / 802.11
409 * frame encapsulation headers; it starts with the encapsulated IP packet
410 * (or whatever ethertype is carried within the ethernet-ish frame).
411 * The LL host data SW will determine how many bytes of the MSDU header to
412 * download by adding this download size specification to the size of the
413 * frame header format specified by the ol_cfg_frame_type configuration
414 * function.
415 *
416 * @param pdev - handle to the physical device
417 * @return the number of bytes beyond the 802.3 or native WiFi header to
418 * download to the target for tx classification
419 */
420 int ol_cfg_tx_download_size(struct cdp_cfg *cfg_pdev);
421
422 /**
423 * brief Specify where defrag timeout and duplicate detection is handled
424 * @details
425 * non-aggregate duplicate detection and timing out stale fragments
426 * requires additional target memory. To reach max client
427 * configurations (128+), non-aggregate duplicate detection and the
428 * logic to time out stale fragments is moved to the host.
429 *
430 * @param pdev - handle to the physical device
431 * @return
432 * 0 -> target is responsible non-aggregate duplicate detection and
433 * timing out stale fragments.
434 *
435 * 1 -> host is responsible non-aggregate duplicate detection and
436 * timing out stale fragments.
437 */
438 int ol_cfg_rx_host_defrag_timeout_duplicate_check(struct cdp_cfg *cfg_pdev);
439
440 /**
441 * brief Query for the period in ms used for throttling for
442 * thermal mitigation
443 * @details
444 * In LL systems, transmit data throttling is used for thermal
445 * mitigation where data is paused and resumed during the
446 * throttle period i.e. the throttle period consists of an
447 * "on" phase when transmit is allowed and an "off" phase when
448 * transmit is suspended. This function returns the total
449 * period used for throttling.
450 *
451 * @param pdev - handle to the physical device
452 * @return the total throttle period in ms
453 */
454 int ol_cfg_throttle_period_ms(struct cdp_cfg *cfg_pdev);
455
456 /**
457 * brief Query for the duty cycle in percentage used for throttling for
458 * thermal mitigation
459 *
460 * @param pdev - handle to the physical device
461 * @param level - duty cycle level
462 * @return the duty cycle level in percentage
463 */
464 int ol_cfg_throttle_duty_cycle_level(struct cdp_cfg *cfg_pdev, int level);
465
466 /**
467 * brief Check whether full reorder offload is
468 * enabled/disable by the host
469 * @details
470 * If the host does not support receive reorder (i.e. the
471 * target performs full receive re-ordering) this will return
472 * "enabled"
473 *
474 * @param pdev - handle to the physical device
475 * @return 1 - enable, 0 - disable
476 */
477 int ol_cfg_is_full_reorder_offload(struct cdp_cfg *cfg_pdev);
478
479 int ol_cfg_is_rx_thread_enabled(struct cdp_cfg *cfg_pdev);
480
481 #ifdef WLAN_FEATURE_TSF_PLUS
482 void ol_set_cfg_ptp_rx_opt_enabled(struct cdp_cfg *cfg_pdev, u_int8_t val);
483 u_int8_t ol_cfg_is_ptp_rx_opt_enabled(struct cdp_cfg *cfg_pdev);
484 #else
485 static inline void
ol_set_cfg_ptp_rx_opt_enabled(struct cdp_cfg * cfg_pdev,u_int8_t val)486 ol_set_cfg_ptp_rx_opt_enabled(struct cdp_cfg *cfg_pdev, u_int8_t val)
487 {
488 }
489
490 static inline u_int8_t
ol_cfg_is_ptp_rx_opt_enabled(struct cdp_cfg * cfg_pdev)491 ol_cfg_is_ptp_rx_opt_enabled(struct cdp_cfg *cfg_pdev)
492 {
493 return 0;
494 }
495 #endif
496
497 /**
498 * ol_cfg_is_ip_tcp_udp_checksum_offload_enabled() - return
499 * ip_tcp_udp_checksum_offload is enable/disable
500 * @pdev : handle to the physical device
501 *
502 * Return: 1 - enable, 0 - disable
503 */
504 static inline
ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(struct cdp_cfg * cfg_pdev)505 int ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(struct cdp_cfg *cfg_pdev)
506 {
507 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
508
509 return cfg->ip_tcp_udp_checksum_offload;
510 }
511
512
513 #if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(QCA_LL_PDEV_TX_FLOW_CONTROL)
514 int ol_cfg_get_tx_flow_stop_queue_th(struct cdp_cfg *cfg_pdev);
515
516 int ol_cfg_get_tx_flow_start_queue_offset(struct cdp_cfg *cfg_pdev);
517 #endif
518
519 bool ol_cfg_is_ce_classify_enabled(struct cdp_cfg *cfg_pdev);
520
521 enum wlan_target_fmt_translation_caps {
522 wlan_frm_tran_cap_raw = 0x01,
523 wlan_frm_tran_cap_native_wifi = 0x02,
524 wlan_frm_tran_cap_8023 = 0x04,
525 };
526
527 /**
528 * @brief Specify the maximum header size added by SW tx encapsulation
529 * @details
530 * This function returns the maximum size of the new L2 header, not the
531 * difference between the new and old L2 headers.
532 * Thus, this function returns the maximum 802.11 header size that the
533 * tx SW may need to add to tx data frames.
534 *
535 * @param pdev - handle to the physical device
536 */
ol_cfg_sw_encap_hdr_max_size(struct cdp_cfg * cfg_pdev)537 static inline int ol_cfg_sw_encap_hdr_max_size(struct cdp_cfg *cfg_pdev)
538 {
539 /*
540 * 24 byte basic 802.11 header
541 * + 6 byte 4th addr
542 * + 2 byte QoS control
543 * + 4 byte HT control
544 * + 8 byte LLC/SNAP
545 */
546 return sizeof(struct ieee80211_qosframe_htc_addr4) + LLC_SNAP_HDR_LEN;
547 }
548
ol_cfg_tx_encap(struct cdp_cfg * cfg_pdev)549 static inline uint8_t ol_cfg_tx_encap(struct cdp_cfg *cfg_pdev)
550 {
551 /* tx encap done in HW */
552 return 0;
553 }
554
ol_cfg_host_addba(struct cdp_cfg * cfg_pdev)555 static inline int ol_cfg_host_addba(struct cdp_cfg *cfg_pdev)
556 {
557 /*
558 * ADDBA negotiation is handled by the target FW for Peregrine + Rome.
559 */
560 return 0;
561 }
562
563 /**
564 * @brief If the host SW's ADDBA negotiation fails, should it be retried?
565 *
566 * @param pdev - handle to the physical device
567 */
ol_cfg_addba_retry(struct cdp_cfg * cfg_pdev)568 static inline int ol_cfg_addba_retry(struct cdp_cfg *cfg_pdev)
569 {
570 return 0; /* disabled for now */
571 }
572
573 /**
574 * @brief How many frames to hold in a paused vdev's tx queue in LL systems
575 */
ol_tx_cfg_max_tx_queue_depth_ll(struct cdp_cfg * cfg_pdev)576 static inline int ol_tx_cfg_max_tx_queue_depth_ll(struct cdp_cfg *cfg_pdev)
577 {
578 /*
579 * Store up to 1500 frames for a paused vdev.
580 * For example, if the vdev is sending 300 Mbps of traffic, and the
581 * PHY is capable of 600 Mbps, then it will take 56 ms for the PHY to
582 * drain both the 700 frames that are queued initially, plus the next
583 * 700 frames that come in while the PHY is catching up.
584 * So in this example scenario, the PHY will remain fully utilized
585 * in a MCC system that has a channel-switching period of 56 ms or less.
586 * 700 frames calculation was correct when FW drain packet without
587 * any overhead. Actual situation drain overhead will slowdown drain
588 * speed. And channel period is less than 56 msec
589 * Worst scenario, 1500 frames should be stored in host.
590 */
591 return 1500;
592 }
593
594 /**
595 * @brief Get packet log config from HTT config
596 */
597 uint8_t ol_cfg_is_packet_log_enabled(struct cdp_cfg *cfg_pdev);
598
599 #ifdef IPA_OFFLOAD
600 /**
601 * @brief IPA micro controller data path offload enable or not
602 * @detail
603 * This function returns IPA micro controller data path offload
604 * feature enabled or not
605 *
606 * @param pdev - handle to the physical device
607 */
608 unsigned int ol_cfg_ipa_uc_offload_enabled(struct cdp_cfg *cfg_pdev);
609 /**
610 * @brief IPA micro controller data path TX buffer size
611 * @detail
612 * This function returns IPA micro controller data path offload
613 * TX buffer size which should be pre-allocated by driver.
614 * Default buffer size is 2K
615 *
616 * @param pdev - handle to the physical device
617 */
618 unsigned int ol_cfg_ipa_uc_tx_buf_size(struct cdp_cfg *cfg_pdev);
619 /**
620 * @brief IPA micro controller data path TX buffer size
621 * @detail
622 * This function returns IPA micro controller data path offload
623 * TX buffer count which should be pre-allocated by driver.
624 *
625 * @param pdev - handle to the physical device
626 */
627 unsigned int ol_cfg_ipa_uc_tx_max_buf_cnt(struct cdp_cfg *cfg_pdev);
628 /**
629 * @brief IPA micro controller data path TX buffer size
630 * @detail
631 * This function returns IPA micro controller data path offload
632 * RX indication ring size which will notified by WLAN FW to IPA
633 * micro controller
634 *
635 * @param pdev - handle to the physical device
636 */
637 unsigned int ol_cfg_ipa_uc_rx_ind_ring_size(struct cdp_cfg *cfg_pdev);
638 /**
639 * @brief IPA micro controller data path TX buffer size
640 * @param pdev - handle to the physical device
641 */
642 unsigned int ol_cfg_ipa_uc_tx_partition_base(struct cdp_cfg *cfg_pdev);
643 void ol_cfg_set_ipa_uc_tx_partition_base(struct cdp_cfg *cfg_pdev,
644 uint32_t value);
645 #else
ol_cfg_ipa_uc_offload_enabled(struct cdp_cfg * cfg_pdev)646 static inline unsigned int ol_cfg_ipa_uc_offload_enabled(
647 struct cdp_cfg *cfg_pdev)
648 {
649 return 0;
650 }
651
ol_cfg_ipa_uc_tx_buf_size(struct cdp_cfg * cfg_pdev)652 static inline unsigned int ol_cfg_ipa_uc_tx_buf_size(
653 struct cdp_cfg *cfg_pdev)
654 {
655 return 0;
656 }
657
ol_cfg_ipa_uc_tx_max_buf_cnt(struct cdp_cfg * cfg_pdev)658 static inline unsigned int ol_cfg_ipa_uc_tx_max_buf_cnt(
659 struct cdp_cfg *cfg_pdev)
660 {
661 return 0;
662 }
663
ol_cfg_ipa_uc_rx_ind_ring_size(struct cdp_cfg * cfg_pdev)664 static inline unsigned int ol_cfg_ipa_uc_rx_ind_ring_size(
665 struct cdp_cfg *cfg_pdev)
666 {
667 return 0;
668 }
669
ol_cfg_ipa_uc_tx_partition_base(struct cdp_cfg * cfg_pdev)670 static inline unsigned int ol_cfg_ipa_uc_tx_partition_base(
671 struct cdp_cfg *cfg_pdev)
672 {
673 return 0;
674 }
675
ol_cfg_set_ipa_uc_tx_partition_base(void * cfg_pdev,uint32_t value)676 static inline void ol_cfg_set_ipa_uc_tx_partition_base(
677 void *cfg_pdev, uint32_t value)
678 {
679 }
680 #endif /* IPA_OFFLOAD */
681
682 /**
683 * ol_set_cfg_flow_steering - Set Rx flow steering config based on CFG ini
684 * config.
685 *
686 * @pdev - handle to the physical device
687 * @val - 0 - disable, 1 - enable
688 *
689 * Return: None
690 */
ol_set_cfg_flow_steering(struct cdp_cfg * cfg_pdev,uint8_t val)691 static inline void ol_set_cfg_flow_steering(struct cdp_cfg *cfg_pdev,
692 uint8_t val)
693 {
694 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
695
696 cfg->flow_steering_enabled = val;
697 }
698
699 /**
700 * ol_cfg_is_flow_steering_enabled - Return Rx flow steering config.
701 *
702 * @pdev - handle to the physical device
703 *
704 * Return: value of configured flow steering value.
705 */
ol_cfg_is_flow_steering_enabled(struct cdp_cfg * cfg_pdev)706 static inline uint8_t ol_cfg_is_flow_steering_enabled(struct cdp_cfg *cfg_pdev)
707 {
708 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
709
710 return cfg->flow_steering_enabled;
711 }
712
713 /**
714 * ol_set_cfg_new_htt_format - Set whether FW supports new htt format
715 *
716 * @pdev - handle to the physical device
717 * @val - true - supported, false - not supported
718 *
719 * Return: None
720 */
721 static inline void
ol_set_cfg_new_htt_format(struct cdp_cfg * cfg_pdev,bool val)722 ol_set_cfg_new_htt_format(struct cdp_cfg *cfg_pdev, bool val)
723 {
724 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
725
726 cfg->new_htt_format_enabled = val;
727 }
728
729 /**
730 * ol_cfg_is_htt_new_format_enabled - Return whether FW supports new htt format
731 *
732 * @pdev - handle to the physical device
733 *
734 * Return: value of configured htt_new_format
735 */
736 static inline bool
ol_cfg_is_htt_new_format_enabled(struct cdp_cfg * cfg_pdev)737 ol_cfg_is_htt_new_format_enabled(struct cdp_cfg *cfg_pdev)
738 {
739 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
740
741 return cfg->new_htt_format_enabled;
742 }
743
744 #ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
745 /**
746 * ol_cfg_get_del_ack_timer_value() - get delayed ack timer value
747 * @cfg_pdev: pdev handle
748 *
749 * Return: timer value
750 */
751 int ol_cfg_get_del_ack_timer_value(struct cdp_cfg *cfg_pdev);
752
753 /**
754 * ol_cfg_get_del_ack_enable_value() - get delayed ack enable value
755 * @cfg_pdev: pdev handle
756 *
757 * Return: enable/disable
758 */
759 bool ol_cfg_get_del_ack_enable_value(struct cdp_cfg *cfg_pdev);
760
761 /**
762 * ol_cfg_get_del_ack_count_value() - get delayed ack count value
763 * @cfg_pdev: pdev handle
764 *
765 * Return: count value
766 */
767 int ol_cfg_get_del_ack_count_value(struct cdp_cfg *cfg_pdev);
768
769 /**
770 * ol_cfg_update_del_ack_params() - update delayed ack params
771 * @cfg_ctx: cfg context
772 * @cfg_param: parameters
773 *
774 * Return: none
775 */
776 void ol_cfg_update_del_ack_params(struct txrx_pdev_cfg_t *cfg_ctx,
777 struct txrx_pdev_cfg_param_t *cfg_param);
778 #else
779 /**
780 * ol_cfg_update_del_ack_params() - update delayed ack params
781 * @cfg_ctx: cfg context
782 * @cfg_param: parameters
783 *
784 * Return: none
785 */
786 static inline
ol_cfg_update_del_ack_params(struct txrx_pdev_cfg_t * cfg_ctx,struct txrx_pdev_cfg_param_t * cfg_param)787 void ol_cfg_update_del_ack_params(struct txrx_pdev_cfg_t *cfg_ctx,
788 struct txrx_pdev_cfg_param_t *cfg_param)
789 {
790 }
791 #endif
792
793 #ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
794 int ol_cfg_get_bundle_timer_value(struct cdp_cfg *cfg_pdev);
795 int ol_cfg_get_bundle_size(struct cdp_cfg *cfg_pdev);
796 #else
797 #endif
798 /**
799 * ol_cfg_get_wrr_skip_weight() - brief Query for the param of wrr_skip_weight
800 * @pdev: handle to the physical device.
801 * @ac: access control, it will be BE, BK, VI, VO
802 *
803 * Return: wrr_skip_weight for specified ac.
804 */
805 int ol_cfg_get_wrr_skip_weight(struct cdp_cfg *pdev, int ac);
806
807 /**
808 * ol_cfg_get_credit_threshold() - Query for the param of credit_threshold
809 * @pdev: handle to the physical device.
810 * @ac: access control, it will be BE, BK, VI, VO
811 *
812 * Return: credit_threshold for specified ac.
813 */
814 uint32_t ol_cfg_get_credit_threshold(struct cdp_cfg *pdev, int ac);
815
816 /**
817 * ol_cfg_get_send_limit() - Query for the param of send_limit
818 * @pdev: handle to the physical device.
819 * @ac: access control, it will be BE, BK, VI, VO
820 *
821 * Return: send_limit for specified ac.
822 */
823 uint16_t ol_cfg_get_send_limit(struct cdp_cfg *pdev, int ac);
824
825 /**
826 * ol_cfg_get_credit_reserve() - Query for the param of credit_reserve
827 * @pdev: handle to the physical device.
828 * @ac: access control, it will be BE, BK, VI, VO
829 *
830 * Return: credit_reserve for specified ac.
831 */
832 int ol_cfg_get_credit_reserve(struct cdp_cfg *pdev, int ac);
833
834 /**
835 * ol_cfg_get_discard_weight() - Query for the param of discard_weight
836 * @pdev: handle to the physical device.
837 * @ac: access control, it will be BE, BK, VI, VO
838 *
839 * Return: discard_weight for specified ac.
840 */
841 int ol_cfg_get_discard_weight(struct cdp_cfg *pdev, int ac);
842 #endif /* _OL_CFG__H_ */
843