1 /*
2 * Copyright (c) 2012-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /**
21 * DOC: wlan_hdd_tx_rx.c
22 *
23 * Linux HDD Tx/RX APIs
24 */
25
26 /* denote that this file does not allow legacy hddLog */
27 #define HDD_DISALLOW_LEGACY_HDDLOG 1
28 #include "osif_sync.h"
29 #include <wlan_hdd_tx_rx.h>
30 #include <wlan_hdd_softap_tx_rx.h>
31 #include <wlan_hdd_napi.h>
32 #include <linux/netdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/etherdevice.h>
35 #include <linux/if_ether.h>
36 #include <linux/inetdevice.h>
37 #include <cds_sched.h>
38 #include <cds_utils.h>
39
40 #include <linux/wireless.h>
41 #include <net/cfg80211.h>
42 #include "sap_api.h"
43 #include "wlan_hdd_wmm.h"
44 #include <cdp_txrx_cmn.h>
45 #include <cdp_txrx_peer_ops.h>
46 #include <cdp_txrx_flow_ctrl_v2.h>
47 #include <cdp_txrx_misc.h>
48 #include "wlan_hdd_power.h"
49 #include "wlan_hdd_cfg80211.h"
50 #include <wlan_hdd_tsf.h>
51 #include <net/tcp.h>
52
53 #include <ol_defines.h>
54 #include "cfg_ucfg_api.h"
55 #include "target_type.h"
56 #include "wlan_hdd_object_manager.h"
57 #include <wlan_hdd_sar_limits.h>
58 #include "wlan_hdd_object_manager.h"
59 #include "wlan_dp_ucfg_api.h"
60 #include "os_if_dp.h"
61 #include "wlan_ipa_ucfg_api.h"
62 #include "wlan_hdd_stats.h"
63
64 #ifdef TX_MULTIQ_PER_AC
65 #if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(QCA_LL_PDEV_TX_FLOW_CONTROL)
66 /*
67 * Mapping Linux AC interpretation to SME AC.
68 * Host has 4 queues per access category (4 AC) and 1 high priority queue.
69 * 16 flow-controlled queues for regular traffic and one non-flow
70 * controlled queue for high priority control traffic(EOPOL, DHCP).
71 * The seventeenth queue is mapped to AC_VO to allow for proper prioritization.
72 */
73 const uint8_t hdd_qdisc_ac_to_tl_ac[] = {
74 SME_AC_VO,
75 SME_AC_VO,
76 SME_AC_VO,
77 SME_AC_VO,
78 SME_AC_VI,
79 SME_AC_VI,
80 SME_AC_VI,
81 SME_AC_VI,
82 SME_AC_BE,
83 SME_AC_BE,
84 SME_AC_BE,
85 SME_AC_BE,
86 SME_AC_BK,
87 SME_AC_BK,
88 SME_AC_BK,
89 SME_AC_BK,
90 SME_AC_VO,
91 };
92
93 #else
94 const uint8_t hdd_qdisc_ac_to_tl_ac[] = {
95 SME_AC_VO,
96 SME_AC_VO,
97 SME_AC_VO,
98 SME_AC_VO,
99 SME_AC_VI,
100 SME_AC_VI,
101 SME_AC_VI,
102 SME_AC_VI,
103 SME_AC_BE,
104 SME_AC_BE,
105 SME_AC_BE,
106 SME_AC_BE,
107 SME_AC_BK,
108 SME_AC_BK,
109 SME_AC_BK,
110 SME_AC_BK,
111 };
112
113 #endif
114 #else
115 #if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(QCA_LL_PDEV_TX_FLOW_CONTROL)
116 /*
117 * Mapping Linux AC interpretation to SME AC.
118 * Host has 5 tx queues, 4 flow-controlled queues for regular traffic and
119 * one non-flow-controlled queue for high priority control traffic(EOPOL, DHCP).
120 * The fifth queue is mapped to AC_VO to allow for proper prioritization.
121 */
122 const uint8_t hdd_qdisc_ac_to_tl_ac[] = {
123 SME_AC_VO,
124 SME_AC_VI,
125 SME_AC_BE,
126 SME_AC_BK,
127 SME_AC_VO,
128 };
129
130 #else
131 const uint8_t hdd_qdisc_ac_to_tl_ac[] = {
132 SME_AC_VO,
133 SME_AC_VI,
134 SME_AC_BE,
135 SME_AC_BK,
136 };
137
138 #endif
139 #endif
140
141 #ifdef QCA_HL_NETDEV_FLOW_CONTROL
hdd_register_hl_netdev_fc_timer(struct hdd_adapter * adapter,qdf_mc_timer_callback_t timer_callback)142 void hdd_register_hl_netdev_fc_timer(struct hdd_adapter *adapter,
143 qdf_mc_timer_callback_t timer_callback)
144 {
145 if (!adapter->tx_flow_timer_initialized) {
146 qdf_mc_timer_init(&adapter->tx_flow_control_timer,
147 QDF_TIMER_TYPE_SW, timer_callback, adapter);
148 adapter->tx_flow_timer_initialized = true;
149 }
150 }
151
152 /**
153 * hdd_deregister_hl_netdev_fc_timer() - Deregister HL Flow Control Timer
154 * @adapter: adapter handle
155 *
156 * Return: none
157 */
hdd_deregister_hl_netdev_fc_timer(struct hdd_adapter * adapter)158 void hdd_deregister_hl_netdev_fc_timer(struct hdd_adapter *adapter)
159 {
160 if (adapter->tx_flow_timer_initialized) {
161 qdf_mc_timer_stop(&adapter->tx_flow_control_timer);
162 qdf_mc_timer_destroy(&adapter->tx_flow_control_timer);
163 adapter->tx_flow_timer_initialized = false;
164 }
165 }
166
167 /**
168 * hdd_tx_resume_timer_expired_handler() - TX Q resume timer handler
169 * @adapter_context: pointer to vdev adapter
170 *
171 * Return: None
172 */
hdd_tx_resume_timer_expired_handler(void * adapter_context)173 void hdd_tx_resume_timer_expired_handler(void *adapter_context)
174 {
175 struct hdd_adapter *adapter = (struct hdd_adapter *)adapter_context;
176 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
177 u32 p_qpaused;
178 u32 np_qpaused;
179
180 if (!adapter) {
181 hdd_err("invalid adapter context");
182 return;
183 }
184
185 cdp_display_stats(soc, CDP_DUMP_TX_FLOW_POOL_INFO,
186 QDF_STATS_VERBOSITY_LEVEL_LOW);
187 wlan_hdd_display_adapter_netif_queue_history(adapter);
188 hdd_debug("Enabling queues");
189 spin_lock_bh(&adapter->pause_map_lock);
190 p_qpaused = adapter->pause_map & BIT(WLAN_DATA_FLOW_CONTROL_PRIORITY);
191 np_qpaused = adapter->pause_map & BIT(WLAN_DATA_FLOW_CONTROL);
192 spin_unlock_bh(&adapter->pause_map_lock);
193
194 if (p_qpaused) {
195 wlan_hdd_netif_queue_control(adapter,
196 WLAN_NETIF_PRIORITY_QUEUE_ON,
197 WLAN_DATA_FLOW_CONTROL_PRIORITY);
198 cdp_hl_fc_set_os_queue_status(soc,
199 adapter->deflink->vdev_id,
200 WLAN_NETIF_PRIORITY_QUEUE_ON);
201 }
202 if (np_qpaused) {
203 wlan_hdd_netif_queue_control(adapter,
204 WLAN_WAKE_NON_PRIORITY_QUEUE,
205 WLAN_DATA_FLOW_CONTROL);
206 cdp_hl_fc_set_os_queue_status(soc,
207 adapter->deflink->vdev_id,
208 WLAN_WAKE_NON_PRIORITY_QUEUE);
209 }
210 }
211
212 #endif /* QCA_HL_NETDEV_FLOW_CONTROL */
213
214 #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
215 /**
216 * hdd_tx_resume_timer_expired_handler() - TX Q resume timer handler
217 * @adapter_context: pointer to vdev adapter
218 *
219 * If Blocked OS Q is not resumed during timeout period, to prevent
220 * permanent stall, resume OS Q forcefully.
221 *
222 * Return: None
223 */
hdd_tx_resume_timer_expired_handler(void * adapter_context)224 void hdd_tx_resume_timer_expired_handler(void *adapter_context)
225 {
226 struct hdd_adapter *adapter = (struct hdd_adapter *) adapter_context;
227
228 if (!adapter) {
229 /* INVALID ARG */
230 return;
231 }
232
233 hdd_debug("Enabling queues");
234 wlan_hdd_netif_queue_control(adapter, WLAN_WAKE_ALL_NETIF_QUEUE,
235 WLAN_CONTROL_PATH);
236 }
237
238 /**
239 * hdd_tx_resume_false() - Resume OS TX Q false leads to queue disabling
240 * @adapter: pointer to hdd adapter
241 * @tx_resume: TX Q resume trigger
242 *
243 *
244 * Return: None
245 */
246 static void
hdd_tx_resume_false(struct hdd_adapter * adapter,bool tx_resume)247 hdd_tx_resume_false(struct hdd_adapter *adapter, bool tx_resume)
248 {
249 QDF_STATUS status;
250 qdf_mc_timer_t *fc_timer;
251
252 if (true == tx_resume)
253 return;
254
255 /* Pause TX */
256 hdd_debug("Disabling queues");
257 wlan_hdd_netif_queue_control(adapter, WLAN_STOP_ALL_NETIF_QUEUE,
258 WLAN_DATA_FLOW_CONTROL);
259
260 fc_timer = &adapter->tx_flow_control_timer;
261 if (QDF_TIMER_STATE_STOPPED != qdf_mc_timer_get_current_state(fc_timer))
262 goto update_stats;
263
264
265 status = qdf_mc_timer_start(fc_timer,
266 WLAN_HDD_TX_FLOW_CONTROL_OS_Q_BLOCK_TIME);
267
268 if (QDF_IS_STATUS_ERROR(status))
269 hdd_err("Failed to start tx_flow_control_timer");
270 else
271 adapter->deflink->hdd_stats.tx_rx_stats.txflow_timer_cnt++;
272
273 update_stats:
274 adapter->deflink->hdd_stats.tx_rx_stats.txflow_pause_cnt++;
275 adapter->deflink->hdd_stats.tx_rx_stats.is_txflow_paused = true;
276 }
277
278 /**
279 * hdd_tx_resume_cb() - Resume OS TX Q.
280 * @adapter_context: pointer to vdev apdapter
281 * @tx_resume: TX Q resume trigger
282 *
283 * Q was stopped due to WLAN TX path low resource condition
284 *
285 * Return: None
286 */
hdd_tx_resume_cb(void * adapter_context,bool tx_resume)287 void hdd_tx_resume_cb(void *adapter_context, bool tx_resume)
288 {
289 struct hdd_adapter *adapter = (struct hdd_adapter *) adapter_context;
290 struct hdd_station_ctx *hdd_sta_ctx = NULL;
291
292 if (!adapter) {
293 /* INVALID ARG */
294 return;
295 }
296
297 hdd_sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter->deflink);
298
299 /* Resume TX */
300 if (true == tx_resume) {
301 if (QDF_TIMER_STATE_STOPPED !=
302 qdf_mc_timer_get_current_state(&adapter->
303 tx_flow_control_timer)) {
304 qdf_mc_timer_stop(&adapter->tx_flow_control_timer);
305 }
306 hdd_debug("Enabling queues");
307 wlan_hdd_netif_queue_control(adapter,
308 WLAN_WAKE_ALL_NETIF_QUEUE,
309 WLAN_DATA_FLOW_CONTROL);
310 adapter->deflink->hdd_stats.tx_rx_stats.is_txflow_paused =
311 false;
312 adapter->deflink->hdd_stats.tx_rx_stats.txflow_unpause_cnt++;
313 }
314 hdd_tx_resume_false(adapter, tx_resume);
315 }
316
hdd_tx_flow_control_is_pause(void * adapter_context)317 bool hdd_tx_flow_control_is_pause(void *adapter_context)
318 {
319 struct hdd_adapter *adapter = (struct hdd_adapter *) adapter_context;
320
321 if ((!adapter) || (WLAN_HDD_ADAPTER_MAGIC != adapter->magic)) {
322 /* INVALID ARG */
323 hdd_err("invalid adapter %pK", adapter);
324 return false;
325 }
326
327 return adapter->pause_map & (1 << WLAN_DATA_FLOW_CONTROL);
328 }
329
hdd_register_tx_flow_control(struct hdd_adapter * adapter,qdf_mc_timer_callback_t timer_callback,ol_txrx_tx_flow_control_fp flow_control_fp,ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause_fp)330 void hdd_register_tx_flow_control(struct hdd_adapter *adapter,
331 qdf_mc_timer_callback_t timer_callback,
332 ol_txrx_tx_flow_control_fp flow_control_fp,
333 ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause_fp)
334 {
335 if (adapter->tx_flow_timer_initialized == false) {
336 qdf_mc_timer_init(&adapter->tx_flow_control_timer,
337 QDF_TIMER_TYPE_SW,
338 timer_callback,
339 adapter);
340 adapter->tx_flow_timer_initialized = true;
341 }
342 cdp_fc_register(cds_get_context(QDF_MODULE_ID_SOC),
343 adapter->deflink->vdev_id, flow_control_fp, adapter,
344 flow_control_is_pause_fp);
345 }
346
347 /**
348 * hdd_deregister_tx_flow_control() - Deregister TX Flow control
349 * @adapter: adapter handle
350 *
351 * Return: none
352 */
hdd_deregister_tx_flow_control(struct hdd_adapter * adapter)353 void hdd_deregister_tx_flow_control(struct hdd_adapter *adapter)
354 {
355 cdp_fc_deregister(cds_get_context(QDF_MODULE_ID_SOC),
356 adapter->deflink->vdev_id);
357 if (adapter->tx_flow_timer_initialized == true) {
358 qdf_mc_timer_stop(&adapter->tx_flow_control_timer);
359 qdf_mc_timer_destroy(&adapter->tx_flow_control_timer);
360 adapter->tx_flow_timer_initialized = false;
361 }
362 }
363
hdd_get_tx_resource(uint8_t vdev_id,struct qdf_mac_addr * mac_addr)364 void hdd_get_tx_resource(uint8_t vdev_id,
365 struct qdf_mac_addr *mac_addr)
366 {
367 struct hdd_adapter *adapter;
368 struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
369 uint16_t timer_value = WLAN_HDD_TX_FLOW_CONTROL_OS_Q_BLOCK_TIME;
370 struct wlan_hdd_link_info *link_info;
371 qdf_mc_timer_t *fc_timer;
372
373 link_info = hdd_get_link_info_by_vdev(hdd_ctx, vdev_id);
374 if (!link_info)
375 return;
376
377 adapter = link_info->adapter;
378 if (adapter->device_mode == QDF_P2P_GO_MODE ||
379 adapter->device_mode == QDF_SAP_MODE)
380 timer_value = WLAN_SAP_HDD_TX_FLOW_CONTROL_OS_Q_BLOCK_TIME;
381
382 if (cdp_fc_get_tx_resource(cds_get_context(QDF_MODULE_ID_SOC),
383 OL_TXRX_PDEV_ID, *mac_addr,
384 adapter->tx_flow_low_watermark,
385 adapter->tx_flow_hi_watermark_offset))
386 return;
387
388 hdd_debug("Disabling queues lwm %d hwm offset %d",
389 adapter->tx_flow_low_watermark,
390 adapter->tx_flow_hi_watermark_offset);
391 wlan_hdd_netif_queue_control(adapter, WLAN_STOP_ALL_NETIF_QUEUE,
392 WLAN_DATA_FLOW_CONTROL);
393
394 fc_timer = &adapter->tx_flow_control_timer;
395 if ((adapter->tx_flow_timer_initialized == true) &&
396 (QDF_TIMER_STATE_STOPPED ==
397 qdf_mc_timer_get_current_state(fc_timer))) {
398 qdf_mc_timer_start(fc_timer, timer_value);
399 link_info->hdd_stats.tx_rx_stats.txflow_timer_cnt++;
400 link_info->hdd_stats.tx_rx_stats.txflow_pause_cnt++;
401 link_info->hdd_stats.tx_rx_stats.is_txflow_paused = true;
402 }
403 }
404
405 unsigned int
hdd_get_tx_flow_low_watermark(hdd_cb_handle cb_ctx,qdf_netdev_t netdev)406 hdd_get_tx_flow_low_watermark(hdd_cb_handle cb_ctx, qdf_netdev_t netdev)
407 {
408 struct hdd_adapter *adapter;
409
410 adapter = WLAN_HDD_GET_PRIV_PTR(netdev);
411 if (!adapter)
412 return 0;
413
414 return adapter->tx_flow_low_watermark;
415 }
416 #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
417
418 #ifdef RECEIVE_OFFLOAD
419 qdf_napi_struct
hdd_legacy_gro_get_napi(qdf_nbuf_t nbuf,bool enable_rxthread)420 *hdd_legacy_gro_get_napi(qdf_nbuf_t nbuf, bool enable_rxthread)
421 {
422 struct qca_napi_info *qca_napii;
423 struct qca_napi_data *napid;
424 struct napi_struct *napi_to_use;
425
426 napid = hdd_napi_get_all();
427 if (unlikely(!napid))
428 return NULL;
429
430 qca_napii = hif_get_napi(QDF_NBUF_CB_RX_CTX_ID(nbuf), napid);
431 if (unlikely(!qca_napii))
432 return NULL;
433
434 /*
435 * As we are breaking context in Rxthread mode, there is rx_thread NAPI
436 * corresponds each hif_napi.
437 */
438 if (enable_rxthread)
439 napi_to_use = &qca_napii->rx_thread_napi;
440 else
441 napi_to_use = &qca_napii->napi;
442
443 return (qdf_napi_struct *)napi_to_use;
444 }
445 #else
446 qdf_napi_struct
hdd_legacy_gro_get_napi(qdf_nbuf_t nbuf,bool enable_rxthread)447 *hdd_legacy_gro_get_napi(qdf_nbuf_t nbuf, bool enable_rxthread)
448 {
449 return NULL;
450 }
451 #endif
452
hdd_set_udp_qos_upgrade_config(struct hdd_adapter * adapter,uint8_t priority)453 int hdd_set_udp_qos_upgrade_config(struct hdd_adapter *adapter,
454 uint8_t priority)
455 {
456 if (adapter->device_mode != QDF_STA_MODE) {
457 hdd_info_rl("Data priority upgrade only allowed in STA mode:%d",
458 adapter->device_mode);
459 return -EINVAL;
460 }
461
462 if (priority >= QCA_WLAN_AC_ALL) {
463 hdd_err_rl("Invalid data priority: %d", priority);
464 return -EINVAL;
465 }
466
467 adapter->upgrade_udp_qos_threshold = priority;
468
469 hdd_debug("UDP packets qos upgrade to: %d", priority);
470
471 return 0;
472 }
473
474 #ifdef QCA_WIFI_FTM
475 static inline bool
hdd_drop_tx_packet_on_ftm(struct sk_buff * skb)476 hdd_drop_tx_packet_on_ftm(struct sk_buff *skb)
477 {
478 if (hdd_get_conparam() == QDF_GLOBAL_FTM_MODE) {
479 kfree_skb(skb);
480 return true;
481 }
482 return false;
483 }
484 #else
485 static inline bool
hdd_drop_tx_packet_on_ftm(struct sk_buff * skb)486 hdd_drop_tx_packet_on_ftm(struct sk_buff *skb)
487 {
488 return false;
489 }
490 #endif
491
492 /**
493 * __hdd_hard_start_xmit() - Transmit a frame
494 * @skb: pointer to OS packet (sk_buff)
495 * @dev: pointer to network device
496 *
497 * Function registered with the Linux OS for transmitting
498 * packets. This version of the function directly passes
499 * the packet to Transport Layer.
500 * In case of any packet drop or error, log the error with
501 * INFO HIGH/LOW/MEDIUM to avoid excessive logging in kmsg.
502 *
503 * Return: None
504 */
__hdd_hard_start_xmit(struct sk_buff * skb,struct net_device * dev)505 static void __hdd_hard_start_xmit(struct sk_buff *skb,
506 struct net_device *dev)
507 {
508 struct hdd_adapter *adapter = WLAN_HDD_GET_PRIV_PTR(dev);
509 struct hdd_tx_rx_stats *stats =
510 &adapter->deflink->hdd_stats.tx_rx_stats;
511 struct hdd_station_ctx *sta_ctx = &adapter->deflink->session.station;
512 int cpu = qdf_get_smp_processor_id();
513 bool granted;
514 sme_ac_enum_type ac;
515 enum sme_qos_wmmuptype up;
516 QDF_STATUS status;
517
518 if (hdd_drop_tx_packet_on_ftm(skb))
519 return;
520
521 osif_dp_mark_pkt_type(skb);
522 hdd_tx_latency_record_ingress_ts(adapter, skb);
523
524 /* Get TL AC corresponding to Qdisc queue index/AC. */
525 ac = hdd_qdisc_ac_to_tl_ac[skb->queue_mapping];
526
527 /*
528 * user priority from IP header, which is already extracted and set from
529 * select_queue call back function
530 */
531 up = skb->priority;
532
533 ++stats->per_cpu[cpu].tx_classified_ac[ac];
534 #ifdef HDD_WMM_DEBUG
535 QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_DEBUG,
536 "%s: Classified as ac %d up %d", __func__, ac, up);
537 #endif /* HDD_WMM_DEBUG */
538
539 if (HDD_PSB_CHANGED == adapter->psb_changed) {
540 /*
541 * Function which will determine acquire admittance for a
542 * WMM AC is required or not based on psb configuration done
543 * in the framework
544 */
545 hdd_wmm_acquire_access_required(adapter, ac);
546 }
547 /*
548 * Make sure we already have access to this access category
549 * or it is EAPOL or WAPI frame during initial authentication which
550 * can have artificially boosted higher qos priority.
551 */
552
553 if (((adapter->psb_changed & (1 << ac)) &&
554 likely(adapter->hdd_wmm_status.ac_status[ac].
555 is_access_allowed)) ||
556 ((!sta_ctx->conn_info.is_authenticated) &&
557 (QDF_NBUF_CB_PACKET_TYPE_EAPOL ==
558 QDF_NBUF_CB_GET_PACKET_TYPE(skb) ||
559 QDF_NBUF_CB_PACKET_TYPE_WAPI ==
560 QDF_NBUF_CB_GET_PACKET_TYPE(skb)))) {
561 granted = true;
562 } else {
563 status = hdd_wmm_acquire_access(adapter, ac, &granted);
564 adapter->psb_changed |= (1 << ac);
565 }
566
567 if (!granted) {
568 bool is_default_ac = false;
569 /*
570 * ADDTS request for this AC is sent, for now
571 * send this packet through next available lower
572 * Access category until ADDTS negotiation completes.
573 */
574 while (!likely
575 (adapter->hdd_wmm_status.ac_status[ac].
576 is_access_allowed)) {
577 switch (ac) {
578 case SME_AC_VO:
579 ac = SME_AC_VI;
580 up = SME_QOS_WMM_UP_VI;
581 break;
582 case SME_AC_VI:
583 ac = SME_AC_BE;
584 up = SME_QOS_WMM_UP_BE;
585 break;
586 case SME_AC_BE:
587 ac = SME_AC_BK;
588 up = SME_QOS_WMM_UP_BK;
589 break;
590 default:
591 ac = SME_AC_BK;
592 up = SME_QOS_WMM_UP_BK;
593 is_default_ac = true;
594 break;
595 }
596 if (is_default_ac)
597 break;
598 }
599 skb->priority = up;
600 skb->queue_mapping = hdd_linux_up_to_ac_map[up];
601 }
602
603 /*
604 * vdev in link_info is directly dereferenced because this is per
605 * packet path, hdd_get_vdev_by_user() usage will be very costly
606 * as it involves lock access.
607 * Expectation here is vdev will be present during TX/RX processing
608 * and also DP internally maintaining vdev ref count
609 */
610 status = ucfg_dp_start_xmit((qdf_nbuf_t)skb, adapter->deflink->vdev);
611 if (QDF_IS_STATUS_SUCCESS(status)) {
612 netif_trans_update(dev);
613 wlan_hdd_sar_unsolicited_timer_start(adapter->hdd_ctx);
614 } else {
615 ++stats->per_cpu[cpu].tx_dropped_ac[ac];
616 }
617 }
618
619 /**
620 * hdd_hard_start_xmit() - Wrapper function to protect
621 * __hdd_hard_start_xmit from SSR
622 * @skb: pointer to OS packet
623 * @net_dev: pointer to net_device structure
624 *
625 * Function called by OS if any packet needs to transmit.
626 *
627 * Return: Always returns NETDEV_TX_OK
628 */
hdd_hard_start_xmit(struct sk_buff * skb,struct net_device * net_dev)629 netdev_tx_t hdd_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
630 {
631 __hdd_hard_start_xmit(skb, net_dev);
632
633 return NETDEV_TX_OK;
634 }
635
636 /**
637 * __hdd_tx_timeout() - TX timeout handler
638 * @dev: pointer to network device
639 *
640 * This function is registered as a netdev ndo_tx_timeout method, and
641 * is invoked by the kernel if the driver takes too long to transmit a
642 * frame.
643 *
644 * Return: None
645 */
__hdd_tx_timeout(struct net_device * dev)646 static void __hdd_tx_timeout(struct net_device *dev)
647 {
648 struct hdd_adapter *adapter = WLAN_HDD_GET_PRIV_PTR(dev);
649 struct hdd_context *hdd_ctx;
650 struct netdev_queue *txq;
651 struct wlan_objmgr_vdev *vdev;
652 int i = 0;
653
654 hdd_ctx = WLAN_HDD_GET_CTX(adapter);
655
656 if (hdd_ctx->hdd_wlan_suspended) {
657 hdd_debug("Device is suspended, ignore WD timeout");
658 return;
659 }
660
661 TX_TIMEOUT_TRACE(dev, QDF_MODULE_ID_HDD_DATA);
662 DPTRACE(qdf_dp_trace(NULL, QDF_DP_TRACE_HDD_TX_TIMEOUT,
663 QDF_TRACE_DEFAULT_PDEV_ID,
664 NULL, 0, QDF_TX));
665
666 /* Getting here implies we disabled the TX queues for too
667 * long. Queues are disabled either because of disassociation
668 * or low resource scenarios. In case of disassociation it is
669 * ok to ignore this. But if associated, we have do possible
670 * recovery here
671 */
672
673 for (i = 0; i < NUM_TX_QUEUES; i++) {
674 txq = netdev_get_tx_queue(dev, i);
675 hdd_debug("Queue: %d status: %d txq->trans_start: %lu",
676 i, netif_tx_queue_stopped(txq), txq->trans_start);
677 }
678
679 hdd_debug("carrier state: %d", netif_carrier_ok(dev));
680
681 wlan_hdd_display_adapter_netif_queue_history(adapter);
682
683 vdev = hdd_objmgr_get_vdev_by_user(adapter->deflink, WLAN_DP_ID);
684 if (vdev) {
685 ucfg_dp_tx_timeout(vdev);
686 hdd_objmgr_put_vdev_by_user(vdev, WLAN_DP_ID);
687 }
688 }
689
690 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0))
hdd_tx_timeout(struct net_device * net_dev,unsigned int txqueue)691 void hdd_tx_timeout(struct net_device *net_dev, unsigned int txqueue)
692 #else
693 void hdd_tx_timeout(struct net_device *net_dev)
694 #endif
695 {
696 struct osif_vdev_sync *vdev_sync;
697
698 if (osif_vdev_sync_op_start(net_dev, &vdev_sync))
699 return;
700
701 __hdd_tx_timeout(net_dev);
702
703 osif_vdev_sync_op_stop(vdev_sync);
704 }
705
706 #ifdef RECEIVE_OFFLOAD
hdd_disable_rx_ol_in_concurrency(bool disable)707 void hdd_disable_rx_ol_in_concurrency(bool disable)
708 {
709 struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
710
711 if (!hdd_ctx)
712 return;
713
714 ucfg_dp_rx_handle_concurrency(hdd_ctx->psoc, disable);
715 }
716 #else /* RECEIVE_OFFLOAD */
hdd_disable_rx_ol_in_concurrency(bool disable)717 void hdd_disable_rx_ol_in_concurrency(bool disable)
718 {
719 }
720 #endif /* RECEIVE_OFFLOAD */
721
722 #ifdef WLAN_FEATURE_TSF_PLUS_SOCK_TS
hdd_tsf_timestamp_rx(hdd_cb_handle ctx,qdf_nbuf_t netbuf)723 void hdd_tsf_timestamp_rx(hdd_cb_handle ctx, qdf_nbuf_t netbuf)
724 {
725 struct hdd_context *hdd_ctx = hdd_cb_handle_to_context(ctx);
726
727 if (!hdd_tsf_is_rx_set(hdd_ctx))
728 return;
729
730 hdd_rx_timestamp(netbuf, ktime_to_us(netbuf->tstamp));
731 }
732
hdd_get_tsf_time_cb(qdf_netdev_t netdev,uint64_t input_time,uint64_t * tsf_time)733 void hdd_get_tsf_time_cb(qdf_netdev_t netdev, uint64_t input_time,
734 uint64_t *tsf_time)
735 {
736 struct hdd_adapter *adapter;
737
738 adapter = WLAN_HDD_GET_PRIV_PTR(netdev);
739 if (!adapter)
740 return;
741
742 hdd_get_tsf_time(adapter, input_time, tsf_time);
743 }
744 #endif
745
746 /**
747 * hdd_reason_type_to_string() - return string conversion of reason type
748 * @reason: reason type
749 *
750 * This utility function helps log string conversion of reason type.
751 *
752 * Return: string conversion of device mode, if match found;
753 * "Unknown" otherwise.
754 */
hdd_reason_type_to_string(enum netif_reason_type reason)755 const char *hdd_reason_type_to_string(enum netif_reason_type reason)
756 {
757 switch (reason) {
758 CASE_RETURN_STRING(WLAN_CONTROL_PATH);
759 CASE_RETURN_STRING(WLAN_DATA_FLOW_CONTROL);
760 CASE_RETURN_STRING(WLAN_FW_PAUSE);
761 CASE_RETURN_STRING(WLAN_TX_ABORT);
762 CASE_RETURN_STRING(WLAN_VDEV_STOP);
763 CASE_RETURN_STRING(WLAN_PEER_UNAUTHORISED);
764 CASE_RETURN_STRING(WLAN_THERMAL_MITIGATION);
765 CASE_RETURN_STRING(WLAN_DATA_FLOW_CONTROL_PRIORITY);
766 default:
767 return "Invalid";
768 }
769 }
770
771 /**
772 * hdd_action_type_to_string() - return string conversion of action type
773 * @action: action type
774 *
775 * This utility function helps log string conversion of action_type.
776 *
777 * Return: string conversion of device mode, if match found;
778 * "Unknown" otherwise.
779 */
hdd_action_type_to_string(enum netif_action_type action)780 const char *hdd_action_type_to_string(enum netif_action_type action)
781 {
782
783 switch (action) {
784 CASE_RETURN_STRING(WLAN_STOP_ALL_NETIF_QUEUE);
785 CASE_RETURN_STRING(WLAN_START_ALL_NETIF_QUEUE);
786 CASE_RETURN_STRING(WLAN_WAKE_ALL_NETIF_QUEUE);
787 CASE_RETURN_STRING(WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER);
788 CASE_RETURN_STRING(WLAN_START_ALL_NETIF_QUEUE_N_CARRIER);
789 CASE_RETURN_STRING(WLAN_NETIF_TX_DISABLE);
790 CASE_RETURN_STRING(WLAN_NETIF_TX_DISABLE_N_CARRIER);
791 CASE_RETURN_STRING(WLAN_NETIF_CARRIER_ON);
792 CASE_RETURN_STRING(WLAN_NETIF_CARRIER_OFF);
793 CASE_RETURN_STRING(WLAN_NETIF_PRIORITY_QUEUE_ON);
794 CASE_RETURN_STRING(WLAN_NETIF_PRIORITY_QUEUE_OFF);
795 CASE_RETURN_STRING(WLAN_NETIF_VO_QUEUE_ON);
796 CASE_RETURN_STRING(WLAN_NETIF_VO_QUEUE_OFF);
797 CASE_RETURN_STRING(WLAN_NETIF_VI_QUEUE_ON);
798 CASE_RETURN_STRING(WLAN_NETIF_VI_QUEUE_OFF);
799 CASE_RETURN_STRING(WLAN_NETIF_BE_BK_QUEUE_ON);
800 CASE_RETURN_STRING(WLAN_NETIF_BE_BK_QUEUE_OFF);
801 CASE_RETURN_STRING(WLAN_WAKE_NON_PRIORITY_QUEUE);
802 CASE_RETURN_STRING(WLAN_STOP_NON_PRIORITY_QUEUE);
803 default:
804 return "Invalid";
805 }
806 }
807
808 /**
809 * wlan_hdd_update_queue_oper_stats - update queue operation statistics
810 * @adapter: adapter handle
811 * @action: action type
812 * @reason: reason type
813 */
wlan_hdd_update_queue_oper_stats(struct hdd_adapter * adapter,enum netif_action_type action,enum netif_reason_type reason)814 static void wlan_hdd_update_queue_oper_stats(struct hdd_adapter *adapter,
815 enum netif_action_type action, enum netif_reason_type reason)
816 {
817 switch (action) {
818 case WLAN_STOP_ALL_NETIF_QUEUE:
819 case WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER:
820 case WLAN_NETIF_BE_BK_QUEUE_OFF:
821 case WLAN_NETIF_VI_QUEUE_OFF:
822 case WLAN_NETIF_VO_QUEUE_OFF:
823 case WLAN_NETIF_PRIORITY_QUEUE_OFF:
824 case WLAN_STOP_NON_PRIORITY_QUEUE:
825 adapter->queue_oper_stats[reason].pause_count++;
826 break;
827 case WLAN_START_ALL_NETIF_QUEUE:
828 case WLAN_WAKE_ALL_NETIF_QUEUE:
829 case WLAN_START_ALL_NETIF_QUEUE_N_CARRIER:
830 case WLAN_NETIF_BE_BK_QUEUE_ON:
831 case WLAN_NETIF_VI_QUEUE_ON:
832 case WLAN_NETIF_VO_QUEUE_ON:
833 case WLAN_NETIF_PRIORITY_QUEUE_ON:
834 case WLAN_WAKE_NON_PRIORITY_QUEUE:
835 adapter->queue_oper_stats[reason].unpause_count++;
836 break;
837 default:
838 break;
839 }
840 }
841
842 /**
843 * hdd_netdev_queue_is_locked()
844 * @txq: net device tx queue
845 *
846 * For SMP system, always return false and we could safely rely on
847 * __netif_tx_trylock().
848 *
849 * Return: true locked; false not locked
850 */
851 #ifdef QCA_CONFIG_SMP
hdd_netdev_queue_is_locked(struct netdev_queue * txq)852 static inline bool hdd_netdev_queue_is_locked(struct netdev_queue *txq)
853 {
854 return false;
855 }
856 #else
hdd_netdev_queue_is_locked(struct netdev_queue * txq)857 static inline bool hdd_netdev_queue_is_locked(struct netdev_queue *txq)
858 {
859 return txq->xmit_lock_owner != -1;
860 }
861 #endif
862
863 /**
864 * wlan_hdd_update_txq_timestamp() - update txq timestamp
865 * @dev: net device
866 *
867 * Return: none
868 */
wlan_hdd_update_txq_timestamp(struct net_device * dev)869 static void wlan_hdd_update_txq_timestamp(struct net_device *dev)
870 {
871 struct netdev_queue *txq;
872 int i;
873
874 for (i = 0; i < NUM_TX_QUEUES; i++) {
875 txq = netdev_get_tx_queue(dev, i);
876
877 /*
878 * On UP system, kernel will trigger watchdog bite if spinlock
879 * recursion is detected. Unfortunately recursion is possible
880 * when it is called in dev_queue_xmit() context, where stack
881 * grabs the lock before calling driver's ndo_start_xmit
882 * callback.
883 */
884 if (!hdd_netdev_queue_is_locked(txq)) {
885 if (__netif_tx_trylock(txq)) {
886 txq_trans_update(txq);
887 __netif_tx_unlock(txq);
888 }
889 }
890 }
891 }
892
893 /**
894 * wlan_hdd_update_unpause_time() - update unpause time
895 * @adapter: adapter handle
896 *
897 * Return: none
898 */
wlan_hdd_update_unpause_time(struct hdd_adapter * adapter)899 static void wlan_hdd_update_unpause_time(struct hdd_adapter *adapter)
900 {
901 qdf_time_t curr_time = qdf_system_ticks();
902
903 adapter->total_unpause_time += curr_time - adapter->last_time;
904 adapter->last_time = curr_time;
905 }
906
907 /**
908 * wlan_hdd_update_pause_time() - update pause time
909 * @adapter: adapter handle
910 * @temp_map: pause map
911 *
912 * Return: none
913 */
wlan_hdd_update_pause_time(struct hdd_adapter * adapter,uint32_t temp_map)914 static void wlan_hdd_update_pause_time(struct hdd_adapter *adapter,
915 uint32_t temp_map)
916 {
917 qdf_time_t curr_time = qdf_system_ticks();
918 uint8_t i;
919 qdf_time_t pause_time;
920
921 pause_time = curr_time - adapter->last_time;
922 adapter->total_pause_time += pause_time;
923 adapter->last_time = curr_time;
924
925 for (i = 0; i < WLAN_REASON_TYPE_MAX; i++) {
926 if (temp_map & (1 << i)) {
927 adapter->queue_oper_stats[i].total_pause_time +=
928 pause_time;
929 break;
930 }
931 }
932
933 }
934
935 uint32_t
wlan_hdd_dump_queue_history_state(struct hdd_netif_queue_history * queue_history,char * buf,uint32_t size)936 wlan_hdd_dump_queue_history_state(struct hdd_netif_queue_history *queue_history,
937 char *buf, uint32_t size)
938 {
939 unsigned int i;
940 unsigned int index = 0;
941
942 for (i = 0; i < NUM_TX_QUEUES; i++) {
943 index += qdf_scnprintf(buf + index,
944 size - index,
945 "%u:0x%lx ",
946 i, queue_history->tx_q_state[i]);
947 }
948
949 return index;
950 }
951
952 /**
953 * wlan_hdd_update_queue_history_state() - Save a copy of dev TX queues state
954 * @dev: interface netdev
955 * @q_hist: adapter queue history
956 *
957 * Save netdev TX queues state into adapter queue history.
958 *
959 * Return: None
960 */
961 static void
wlan_hdd_update_queue_history_state(struct net_device * dev,struct hdd_netif_queue_history * q_hist)962 wlan_hdd_update_queue_history_state(struct net_device *dev,
963 struct hdd_netif_queue_history *q_hist)
964 {
965 unsigned int i = 0;
966 uint32_t num_tx_queues = 0;
967 struct netdev_queue *txq = NULL;
968
969 num_tx_queues = qdf_min(dev->num_tx_queues, (uint32_t)NUM_TX_QUEUES);
970
971 for (i = 0; i < num_tx_queues; i++) {
972 txq = netdev_get_tx_queue(dev, i);
973 q_hist->tx_q_state[i] = txq->state;
974 }
975 }
976
977 /**
978 * wlan_hdd_stop_non_priority_queue() - stop non priority queues
979 * @adapter: adapter handle
980 *
981 * Return: None
982 */
wlan_hdd_stop_non_priority_queue(struct hdd_adapter * adapter)983 static inline void wlan_hdd_stop_non_priority_queue(struct hdd_adapter *adapter)
984 {
985 uint8_t i;
986
987 for (i = 0; i < TX_QUEUES_PER_AC; i++) {
988 netif_stop_subqueue(adapter->dev,
989 TX_GET_QUEUE_IDX(HDD_LINUX_AC_VO, i));
990 netif_stop_subqueue(adapter->dev,
991 TX_GET_QUEUE_IDX(HDD_LINUX_AC_VI, i));
992 netif_stop_subqueue(adapter->dev,
993 TX_GET_QUEUE_IDX(HDD_LINUX_AC_BE, i));
994 netif_stop_subqueue(adapter->dev,
995 TX_GET_QUEUE_IDX(HDD_LINUX_AC_BK, i));
996 }
997 }
998
999 /**
1000 * wlan_hdd_wake_non_priority_queue() - wake non priority queues
1001 * @adapter: adapter handle
1002 *
1003 * Return: None
1004 */
wlan_hdd_wake_non_priority_queue(struct hdd_adapter * adapter)1005 static inline void wlan_hdd_wake_non_priority_queue(struct hdd_adapter *adapter)
1006 {
1007 uint8_t i;
1008
1009 for (i = 0; i < TX_QUEUES_PER_AC; i++) {
1010 netif_wake_subqueue(adapter->dev,
1011 TX_GET_QUEUE_IDX(HDD_LINUX_AC_VO, i));
1012 netif_wake_subqueue(adapter->dev,
1013 TX_GET_QUEUE_IDX(HDD_LINUX_AC_VI, i));
1014 netif_wake_subqueue(adapter->dev,
1015 TX_GET_QUEUE_IDX(HDD_LINUX_AC_BE, i));
1016 netif_wake_subqueue(adapter->dev,
1017 TX_GET_QUEUE_IDX(HDD_LINUX_AC_BK, i));
1018 }
1019 }
1020
1021 static inline
hdd_wake_queues_for_ac(struct net_device * dev,enum hdd_wmm_linuxac ac)1022 void hdd_wake_queues_for_ac(struct net_device *dev, enum hdd_wmm_linuxac ac)
1023 {
1024 uint8_t i;
1025
1026 for (i = 0; i < TX_QUEUES_PER_AC; i++)
1027 netif_wake_subqueue(dev, TX_GET_QUEUE_IDX(ac, i));
1028 }
1029
1030 static inline
hdd_stop_queues_for_ac(struct net_device * dev,enum hdd_wmm_linuxac ac)1031 void hdd_stop_queues_for_ac(struct net_device *dev, enum hdd_wmm_linuxac ac)
1032 {
1033 uint8_t i;
1034
1035 for (i = 0; i < TX_QUEUES_PER_AC; i++)
1036 netif_stop_subqueue(dev, TX_GET_QUEUE_IDX(ac, i));
1037 }
1038
1039 /**
1040 * wlan_hdd_netif_queue_control() - Use for netif_queue related actions
1041 * @adapter: adapter handle
1042 * @action: action type
1043 * @reason: reason type
1044 *
1045 * This is single function which is used for netif_queue related
1046 * actions like start/stop of network queues and on/off carrier
1047 * option.
1048 *
1049 * Return: None
1050 */
wlan_hdd_netif_queue_control(struct hdd_adapter * adapter,enum netif_action_type action,enum netif_reason_type reason)1051 void wlan_hdd_netif_queue_control(struct hdd_adapter *adapter,
1052 enum netif_action_type action, enum netif_reason_type reason)
1053 {
1054 uint32_t temp_map;
1055 uint8_t index;
1056 struct hdd_netif_queue_history *txq_hist_ptr;
1057
1058 if ((!adapter) || (WLAN_HDD_ADAPTER_MAGIC != adapter->magic) ||
1059 (!adapter->dev)) {
1060 hdd_err("adapter is invalid");
1061 return;
1062 }
1063
1064 if (hdd_adapter_is_link_adapter(adapter))
1065 return;
1066
1067 hdd_debug_rl("netif_control's vdev_id: %d, action: %d, reason: %d",
1068 adapter->deflink->vdev_id, action, reason);
1069
1070 switch (action) {
1071
1072 case WLAN_NETIF_CARRIER_ON:
1073 netif_carrier_on(adapter->dev);
1074 break;
1075
1076 case WLAN_NETIF_CARRIER_OFF:
1077 netif_carrier_off(adapter->dev);
1078 break;
1079
1080 case WLAN_STOP_ALL_NETIF_QUEUE:
1081 spin_lock_bh(&adapter->pause_map_lock);
1082 if (!adapter->pause_map) {
1083 netif_tx_stop_all_queues(adapter->dev);
1084 wlan_hdd_update_txq_timestamp(adapter->dev);
1085 wlan_hdd_update_unpause_time(adapter);
1086 }
1087 adapter->pause_map |= (1 << reason);
1088 spin_unlock_bh(&adapter->pause_map_lock);
1089 break;
1090
1091 case WLAN_STOP_NON_PRIORITY_QUEUE:
1092 spin_lock_bh(&adapter->pause_map_lock);
1093 if (!adapter->pause_map) {
1094 wlan_hdd_stop_non_priority_queue(adapter);
1095 wlan_hdd_update_txq_timestamp(adapter->dev);
1096 wlan_hdd_update_unpause_time(adapter);
1097 }
1098 adapter->pause_map |= (1 << reason);
1099 spin_unlock_bh(&adapter->pause_map_lock);
1100 break;
1101
1102 case WLAN_NETIF_PRIORITY_QUEUE_ON:
1103 spin_lock_bh(&adapter->pause_map_lock);
1104 if (reason == WLAN_DATA_FLOW_CTRL_PRI) {
1105 temp_map = adapter->subqueue_pause_map;
1106 adapter->subqueue_pause_map &= ~(1 << reason);
1107 } else {
1108 temp_map = adapter->pause_map;
1109 adapter->pause_map &= ~(1 << reason);
1110 }
1111 if (!adapter->pause_map) {
1112 netif_wake_subqueue(adapter->dev,
1113 HDD_LINUX_AC_HI_PRIO * TX_QUEUES_PER_AC);
1114 wlan_hdd_update_pause_time(adapter, temp_map);
1115 }
1116 spin_unlock_bh(&adapter->pause_map_lock);
1117 break;
1118
1119 case WLAN_NETIF_PRIORITY_QUEUE_OFF:
1120 spin_lock_bh(&adapter->pause_map_lock);
1121 if (!adapter->pause_map) {
1122 netif_stop_subqueue(adapter->dev,
1123 HDD_LINUX_AC_HI_PRIO * TX_QUEUES_PER_AC);
1124 wlan_hdd_update_txq_timestamp(adapter->dev);
1125 wlan_hdd_update_unpause_time(adapter);
1126 }
1127 if (reason == WLAN_DATA_FLOW_CTRL_PRI)
1128 adapter->subqueue_pause_map |= (1 << reason);
1129 else
1130 adapter->pause_map |= (1 << reason);
1131 spin_unlock_bh(&adapter->pause_map_lock);
1132 break;
1133
1134 case WLAN_NETIF_BE_BK_QUEUE_OFF:
1135 spin_lock_bh(&adapter->pause_map_lock);
1136 if (!adapter->pause_map) {
1137 hdd_stop_queues_for_ac(adapter->dev, HDD_LINUX_AC_BK);
1138 hdd_stop_queues_for_ac(adapter->dev, HDD_LINUX_AC_BE);
1139 wlan_hdd_update_txq_timestamp(adapter->dev);
1140 wlan_hdd_update_unpause_time(adapter);
1141 }
1142 adapter->subqueue_pause_map |= (1 << reason);
1143 spin_unlock_bh(&adapter->pause_map_lock);
1144 break;
1145
1146 case WLAN_NETIF_BE_BK_QUEUE_ON:
1147 spin_lock_bh(&adapter->pause_map_lock);
1148 temp_map = adapter->subqueue_pause_map;
1149 adapter->subqueue_pause_map &= ~(1 << reason);
1150 if (!adapter->pause_map) {
1151 hdd_wake_queues_for_ac(adapter->dev, HDD_LINUX_AC_BK);
1152 hdd_wake_queues_for_ac(adapter->dev, HDD_LINUX_AC_BE);
1153 wlan_hdd_update_pause_time(adapter, temp_map);
1154 }
1155 spin_unlock_bh(&adapter->pause_map_lock);
1156 break;
1157
1158 case WLAN_NETIF_VI_QUEUE_OFF:
1159 spin_lock_bh(&adapter->pause_map_lock);
1160 if (!adapter->pause_map) {
1161 hdd_stop_queues_for_ac(adapter->dev, HDD_LINUX_AC_VI);
1162 wlan_hdd_update_txq_timestamp(adapter->dev);
1163 wlan_hdd_update_unpause_time(adapter);
1164 }
1165 adapter->subqueue_pause_map |= (1 << reason);
1166 spin_unlock_bh(&adapter->pause_map_lock);
1167 break;
1168
1169 case WLAN_NETIF_VI_QUEUE_ON:
1170 spin_lock_bh(&adapter->pause_map_lock);
1171 temp_map = adapter->subqueue_pause_map;
1172 adapter->subqueue_pause_map &= ~(1 << reason);
1173 if (!adapter->pause_map) {
1174 hdd_wake_queues_for_ac(adapter->dev, HDD_LINUX_AC_VI);
1175 wlan_hdd_update_pause_time(adapter, temp_map);
1176 }
1177 spin_unlock_bh(&adapter->pause_map_lock);
1178 break;
1179
1180 case WLAN_NETIF_VO_QUEUE_OFF:
1181 spin_lock_bh(&adapter->pause_map_lock);
1182 if (!adapter->pause_map) {
1183 hdd_stop_queues_for_ac(adapter->dev, HDD_LINUX_AC_VO);
1184 wlan_hdd_update_txq_timestamp(adapter->dev);
1185 wlan_hdd_update_unpause_time(adapter);
1186 }
1187 adapter->subqueue_pause_map |= (1 << reason);
1188 spin_unlock_bh(&adapter->pause_map_lock);
1189 break;
1190
1191 case WLAN_NETIF_VO_QUEUE_ON:
1192 spin_lock_bh(&adapter->pause_map_lock);
1193 temp_map = adapter->subqueue_pause_map;
1194 adapter->subqueue_pause_map &= ~(1 << reason);
1195 if (!adapter->pause_map) {
1196 hdd_wake_queues_for_ac(adapter->dev, HDD_LINUX_AC_VO);
1197 wlan_hdd_update_pause_time(adapter, temp_map);
1198 }
1199 spin_unlock_bh(&adapter->pause_map_lock);
1200 break;
1201
1202 case WLAN_START_ALL_NETIF_QUEUE:
1203 spin_lock_bh(&adapter->pause_map_lock);
1204 temp_map = adapter->pause_map;
1205 adapter->pause_map &= ~(1 << reason);
1206 if (!adapter->pause_map) {
1207 netif_tx_start_all_queues(adapter->dev);
1208 wlan_hdd_update_pause_time(adapter, temp_map);
1209 }
1210 spin_unlock_bh(&adapter->pause_map_lock);
1211 break;
1212
1213 case WLAN_WAKE_ALL_NETIF_QUEUE:
1214 spin_lock_bh(&adapter->pause_map_lock);
1215 temp_map = adapter->pause_map;
1216 adapter->pause_map &= ~(1 << reason);
1217 if (!adapter->pause_map) {
1218 netif_tx_wake_all_queues(adapter->dev);
1219 wlan_hdd_update_pause_time(adapter, temp_map);
1220 }
1221 spin_unlock_bh(&adapter->pause_map_lock);
1222 break;
1223
1224 case WLAN_WAKE_NON_PRIORITY_QUEUE:
1225 spin_lock_bh(&adapter->pause_map_lock);
1226 temp_map = adapter->pause_map;
1227 adapter->pause_map &= ~(1 << reason);
1228 if (!adapter->pause_map) {
1229 wlan_hdd_wake_non_priority_queue(adapter);
1230 wlan_hdd_update_pause_time(adapter, temp_map);
1231 }
1232 spin_unlock_bh(&adapter->pause_map_lock);
1233 break;
1234
1235 case WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER:
1236 spin_lock_bh(&adapter->pause_map_lock);
1237 if (!adapter->pause_map) {
1238 netif_tx_stop_all_queues(adapter->dev);
1239 wlan_hdd_update_txq_timestamp(adapter->dev);
1240 wlan_hdd_update_unpause_time(adapter);
1241 }
1242 adapter->pause_map |= (1 << reason);
1243 netif_carrier_off(adapter->dev);
1244 spin_unlock_bh(&adapter->pause_map_lock);
1245 break;
1246
1247 case WLAN_START_ALL_NETIF_QUEUE_N_CARRIER:
1248 spin_lock_bh(&adapter->pause_map_lock);
1249 netif_carrier_on(adapter->dev);
1250 temp_map = adapter->pause_map;
1251 adapter->pause_map &= ~(1 << reason);
1252 if (!adapter->pause_map) {
1253 netif_tx_start_all_queues(adapter->dev);
1254 wlan_hdd_update_pause_time(adapter, temp_map);
1255 }
1256 spin_unlock_bh(&adapter->pause_map_lock);
1257 break;
1258
1259 case WLAN_NETIF_ACTION_TYPE_NONE:
1260 break;
1261
1262 default:
1263 hdd_err("unsupported action %d", action);
1264 }
1265
1266 spin_lock_bh(&adapter->pause_map_lock);
1267 if (adapter->pause_map & (1 << WLAN_PEER_UNAUTHORISED))
1268 wlan_hdd_process_peer_unauthorised_pause(adapter);
1269
1270 index = adapter->history_index++;
1271 if (adapter->history_index == WLAN_HDD_MAX_HISTORY_ENTRY)
1272 adapter->history_index = 0;
1273 spin_unlock_bh(&adapter->pause_map_lock);
1274
1275 wlan_hdd_update_queue_oper_stats(adapter, action, reason);
1276
1277 adapter->queue_oper_history[index].time = qdf_system_ticks();
1278 adapter->queue_oper_history[index].netif_action = action;
1279 adapter->queue_oper_history[index].netif_reason = reason;
1280 if (reason >= WLAN_DATA_FLOW_CTRL_BE_BK)
1281 adapter->queue_oper_history[index].pause_map =
1282 adapter->subqueue_pause_map;
1283 else
1284 adapter->queue_oper_history[index].pause_map =
1285 adapter->pause_map;
1286
1287 txq_hist_ptr = &adapter->queue_oper_history[index];
1288
1289 wlan_hdd_update_queue_history_state(adapter->dev, txq_hist_ptr);
1290 }
1291
hdd_print_netdev_txq_status(struct net_device * dev)1292 void hdd_print_netdev_txq_status(struct net_device *dev)
1293 {
1294 unsigned int i;
1295
1296 if (!dev)
1297 return;
1298
1299 for (i = 0; i < dev->num_tx_queues; i++) {
1300 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1301
1302 hdd_debug("netdev tx queue[%u] state:0x%lx",
1303 i, txq->state);
1304 }
1305 }
1306
1307 #ifdef FEATURE_MONITOR_MODE_SUPPORT
1308 /**
1309 * hdd_set_mon_rx_cb() - Set Monitor mode Rx callback
1310 * @dev: Pointer to net_device structure
1311 *
1312 * Return: 0 for success; non-zero for failure
1313 */
hdd_set_mon_rx_cb(struct net_device * dev)1314 int hdd_set_mon_rx_cb(struct net_device *dev)
1315 {
1316 struct hdd_adapter *adapter = WLAN_HDD_GET_PRIV_PTR(dev);
1317 struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
1318 int ret;
1319 QDF_STATUS qdf_status;
1320 struct ol_txrx_desc_type sta_desc = {0};
1321 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
1322 struct wlan_objmgr_vdev *vdev;
1323
1324 WLAN_ADDR_COPY(sta_desc.peer_addr.bytes, adapter->mac_addr.bytes);
1325
1326 vdev = hdd_objmgr_get_vdev_by_user(adapter->deflink, WLAN_DP_ID);
1327 if (!vdev) {
1328 hdd_err("failed to get vdev");
1329 return -EINVAL;
1330 }
1331
1332 qdf_status = ucfg_dp_mon_register_txrx_ops(vdev);
1333 if (QDF_STATUS_SUCCESS != qdf_status) {
1334 hdd_err("failed to register txrx ops. Status= %d [0x%08X]",
1335 qdf_status, qdf_status);
1336 hdd_objmgr_put_vdev_by_user(vdev, WLAN_DP_ID);
1337 goto exit;
1338 }
1339 hdd_objmgr_put_vdev_by_user(vdev, WLAN_DP_ID);
1340
1341 /* peer is created wma_vdev_attach->wma_create_peer */
1342 qdf_status = cdp_peer_register(soc, OL_TXRX_PDEV_ID, &sta_desc);
1343 if (QDF_STATUS_SUCCESS != qdf_status) {
1344 hdd_err("cdp_peer_register() failed to register. Status= %d [0x%08X]",
1345 qdf_status, qdf_status);
1346 goto exit;
1347 }
1348
1349 qdf_status = sme_create_mon_session(hdd_ctx->mac_handle,
1350 adapter->mac_addr.bytes,
1351 adapter->deflink->vdev_id);
1352 if (QDF_STATUS_SUCCESS != qdf_status) {
1353 hdd_err("sme_create_mon_session() failed to register. Status= %d [0x%08X]",
1354 qdf_status, qdf_status);
1355 }
1356
1357 exit:
1358 ret = qdf_status_to_os_return(qdf_status);
1359 return ret;
1360 }
1361 #endif
1362
hdd_tx_queue_cb(hdd_handle_t hdd_handle,uint32_t vdev_id,enum netif_action_type action,enum netif_reason_type reason)1363 void hdd_tx_queue_cb(hdd_handle_t hdd_handle, uint32_t vdev_id,
1364 enum netif_action_type action,
1365 enum netif_reason_type reason)
1366 {
1367 struct hdd_context *hdd_ctx = hdd_handle_to_context(hdd_handle);
1368 struct wlan_hdd_link_info *link_info;
1369
1370 /*
1371 * Validating the context is not required here.
1372 * if there is a driver unload/SSR in progress happening in a
1373 * different context and it has been scheduled to run and
1374 * driver got a firmware event of sta kick out, then it is
1375 * good to disable the Tx Queue to stop the influx of traffic.
1376 */
1377 if (!hdd_ctx) {
1378 hdd_err("Invalid context passed");
1379 return;
1380 }
1381
1382 link_info = hdd_get_link_info_by_vdev(hdd_ctx, vdev_id);
1383 if (!link_info) {
1384 hdd_err("vdev_id %d does not exist with host", vdev_id);
1385 return;
1386 }
1387 hdd_debug("Tx Queue action %d on vdev %d", action, vdev_id);
1388
1389 wlan_hdd_netif_queue_control(link_info->adapter, action, reason);
1390 }
1391
1392 #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
1393 /**
1394 * hdd_ini_tx_flow_control() - Initialize INIs concerned about tx flow control
1395 * @config: pointer to hdd config
1396 * @psoc: pointer to psoc obj
1397 *
1398 * Return: none
1399 */
hdd_ini_tx_flow_control(struct hdd_config * config,struct wlan_objmgr_psoc * psoc)1400 static void hdd_ini_tx_flow_control(struct hdd_config *config,
1401 struct wlan_objmgr_psoc *psoc)
1402 {
1403 config->tx_flow_low_watermark =
1404 cfg_get(psoc, CFG_DP_LL_TX_FLOW_LWM);
1405 config->tx_flow_hi_watermark_offset =
1406 cfg_get(psoc, CFG_DP_LL_TX_FLOW_HWM_OFFSET);
1407 config->tx_flow_max_queue_depth =
1408 cfg_get(psoc, CFG_DP_LL_TX_FLOW_MAX_Q_DEPTH);
1409 config->tx_lbw_flow_low_watermark =
1410 cfg_get(psoc, CFG_DP_LL_TX_LBW_FLOW_LWM);
1411 config->tx_lbw_flow_hi_watermark_offset =
1412 cfg_get(psoc, CFG_DP_LL_TX_LBW_FLOW_HWM_OFFSET);
1413 config->tx_lbw_flow_max_queue_depth =
1414 cfg_get(psoc, CFG_DP_LL_TX_LBW_FLOW_MAX_Q_DEPTH);
1415 config->tx_hbw_flow_low_watermark =
1416 cfg_get(psoc, CFG_DP_LL_TX_HBW_FLOW_LWM);
1417 config->tx_hbw_flow_hi_watermark_offset =
1418 cfg_get(psoc, CFG_DP_LL_TX_HBW_FLOW_HWM_OFFSET);
1419 config->tx_hbw_flow_max_queue_depth =
1420 cfg_get(psoc, CFG_DP_LL_TX_HBW_FLOW_MAX_Q_DEPTH);
1421 }
1422 #else
hdd_ini_tx_flow_control(struct hdd_config * config,struct wlan_objmgr_psoc * psoc)1423 static void hdd_ini_tx_flow_control(struct hdd_config *config,
1424 struct wlan_objmgr_psoc *psoc)
1425 {
1426 }
1427 #endif
1428
1429 #ifdef WLAN_FEATURE_MSCS
1430 /**
1431 * hdd_ini_mscs_params() - Initialize INIs related to MSCS feature
1432 * @config: pointer to hdd config
1433 * @psoc: pointer to psoc obj
1434 *
1435 * Return: none
1436 */
hdd_ini_mscs_params(struct hdd_config * config,struct wlan_objmgr_psoc * psoc)1437 static void hdd_ini_mscs_params(struct hdd_config *config,
1438 struct wlan_objmgr_psoc *psoc)
1439 {
1440 config->mscs_pkt_threshold =
1441 cfg_get(psoc, CFG_VO_PKT_COUNT_THRESHOLD);
1442 config->mscs_voice_interval =
1443 cfg_get(psoc, CFG_MSCS_VOICE_INTERVAL);
1444 }
1445
1446 #else
hdd_ini_mscs_params(struct hdd_config * config,struct wlan_objmgr_psoc * psoc)1447 static inline void hdd_ini_mscs_params(struct hdd_config *config,
1448 struct wlan_objmgr_psoc *psoc)
1449 {
1450 }
1451 #endif
1452
hdd_dp_cfg_update(struct wlan_objmgr_psoc * psoc,struct hdd_context * hdd_ctx)1453 void hdd_dp_cfg_update(struct wlan_objmgr_psoc *psoc,
1454 struct hdd_context *hdd_ctx)
1455 {
1456 struct hdd_config *config;
1457
1458 config = hdd_ctx->config;
1459
1460 config->napi_cpu_affinity_mask =
1461 cfg_get(psoc, CFG_DP_NAPI_CE_CPU_MASK);
1462 config->cfg_wmi_credit_cnt = cfg_get(psoc, CFG_DP_HTC_WMI_CREDIT_CNT);
1463
1464 hdd_ini_tx_flow_control(config, psoc);
1465 hdd_ini_mscs_params(config, psoc);
1466 }
1467
1468 #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
1469 /**
1470 * hdd_set_tx_flow_info() - To set TX flow info
1471 * @adapter: pointer to adapter
1472 * @pre_adp_ctx: pointer to pre-adapter
1473 * @target_channel: target channel
1474 * @pre_adp_channel: pre-adapter channel
1475 * @dbgid: Debug IDs
1476 *
1477 * This routine is called to set TX flow info
1478 *
1479 * Return: None
1480 */
hdd_set_tx_flow_info(struct hdd_adapter * adapter,struct hdd_adapter ** pre_adp_ctx,uint8_t target_channel,uint8_t * pre_adp_channel,wlan_net_dev_ref_dbgid dbgid)1481 static void hdd_set_tx_flow_info(struct hdd_adapter *adapter,
1482 struct hdd_adapter **pre_adp_ctx,
1483 uint8_t target_channel,
1484 uint8_t *pre_adp_channel,
1485 wlan_net_dev_ref_dbgid dbgid)
1486 {
1487 struct hdd_context *hdd_ctx;
1488 uint8_t channel24;
1489 uint8_t channel5;
1490 struct hdd_adapter *adapter2_4 = NULL;
1491 struct hdd_adapter *adapter5 = NULL;
1492 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
1493
1494 hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
1495 if (!hdd_ctx)
1496 return;
1497
1498 if (!target_channel)
1499 return;
1500
1501 /*
1502 * This is first adapter detected as active
1503 * set as default for none concurrency case
1504 */
1505 if (!(*pre_adp_channel)) {
1506 /* If IPA UC data path is enabled,
1507 * target should reserve extra tx descriptors
1508 * for IPA data path.
1509 * Then host data path should allow less TX
1510 * packet pumping in case IPA
1511 * data path enabled
1512 */
1513 if (ucfg_ipa_uc_is_enabled() &&
1514 adapter->device_mode == QDF_SAP_MODE) {
1515 adapter->tx_flow_low_watermark =
1516 hdd_ctx->config->tx_flow_low_watermark +
1517 WLAN_TFC_IPAUC_TX_DESC_RESERVE;
1518 } else {
1519 adapter->tx_flow_low_watermark =
1520 hdd_ctx->config->tx_flow_low_watermark;
1521 }
1522 adapter->tx_flow_hi_watermark_offset =
1523 hdd_ctx->config->tx_flow_hi_watermark_offset;
1524 cdp_fc_ll_set_tx_pause_q_depth(soc,
1525 adapter->deflink->vdev_id,
1526 hdd_ctx->config->tx_flow_max_queue_depth);
1527 hdd_debug("MODE %d,CH %d,LWM %d,HWM %d,TXQDEP %d",
1528 adapter->device_mode,
1529 target_channel,
1530 adapter->tx_flow_low_watermark,
1531 adapter->tx_flow_low_watermark +
1532 adapter->tx_flow_hi_watermark_offset,
1533 hdd_ctx->config->tx_flow_max_queue_depth);
1534 *pre_adp_channel = target_channel;
1535 *pre_adp_ctx = adapter;
1536 } else {
1537 /*
1538 * SCC, disable TX flow control for both
1539 * SCC each adapter cannot reserve dedicated
1540 * channel resource, as a result, if any adapter
1541 * blocked OS Q by flow control,
1542 * blocked adapter will lost chance to recover
1543 */
1544 if (*pre_adp_channel == target_channel) {
1545 /* Current adapter */
1546 adapter->tx_flow_low_watermark = 0;
1547 adapter->tx_flow_hi_watermark_offset = 0;
1548 cdp_fc_ll_set_tx_pause_q_depth(soc,
1549 adapter->deflink->vdev_id,
1550 hdd_ctx->config->tx_hbw_flow_max_queue_depth);
1551 hdd_debug("SCC: MODE %s(%d), CH %d, LWM %d, HWM %d, TXQDEP %d",
1552 qdf_opmode_str(adapter->device_mode),
1553 adapter->device_mode,
1554 target_channel,
1555 adapter->tx_flow_low_watermark,
1556 adapter->tx_flow_low_watermark +
1557 adapter->tx_flow_hi_watermark_offset,
1558 hdd_ctx->config->tx_hbw_flow_max_queue_depth);
1559
1560 if (!(*pre_adp_ctx)) {
1561 hdd_err("SCC: Previous adapter context NULL");
1562 hdd_adapter_dev_put_debug(adapter, dbgid);
1563 return;
1564 }
1565
1566 /* Previous adapter */
1567 (*pre_adp_ctx)->tx_flow_low_watermark = 0;
1568 (*pre_adp_ctx)->tx_flow_hi_watermark_offset = 0;
1569 cdp_fc_ll_set_tx_pause_q_depth(soc,
1570 (*pre_adp_ctx)->deflink->vdev_id,
1571 hdd_ctx->config->tx_hbw_flow_max_queue_depth);
1572 hdd_debug("SCC: MODE %s(%d), CH %d, LWM %d, HWM %d, TXQDEP %d",
1573 qdf_opmode_str((*pre_adp_ctx)->device_mode),
1574 (*pre_adp_ctx)->device_mode,
1575 target_channel,
1576 (*pre_adp_ctx)->tx_flow_low_watermark,
1577 (*pre_adp_ctx)->tx_flow_low_watermark +
1578 (*pre_adp_ctx)->tx_flow_hi_watermark_offset,
1579 hdd_ctx->config->tx_hbw_flow_max_queue_depth);
1580 } else {
1581 /*
1582 * MCC, each adapter will have dedicated
1583 * resource
1584 */
1585 /* current channel is 2.4 */
1586 if (target_channel <=
1587 WLAN_HDD_TX_FLOW_CONTROL_MAX_24BAND_CH) {
1588 channel24 = target_channel;
1589 channel5 = *pre_adp_channel;
1590 adapter2_4 = adapter;
1591 adapter5 = *pre_adp_ctx;
1592 } else {
1593 /* Current channel is 5 */
1594 channel24 = *pre_adp_channel;
1595 channel5 = target_channel;
1596 adapter2_4 = *pre_adp_ctx;
1597 adapter5 = adapter;
1598 }
1599
1600 if (!adapter5) {
1601 hdd_err("MCC: 5GHz adapter context NULL");
1602 hdd_adapter_dev_put_debug(adapter, dbgid);
1603 return;
1604 }
1605 adapter5->tx_flow_low_watermark =
1606 hdd_ctx->config->tx_hbw_flow_low_watermark;
1607 adapter5->tx_flow_hi_watermark_offset =
1608 hdd_ctx->config->tx_hbw_flow_hi_watermark_offset;
1609 cdp_fc_ll_set_tx_pause_q_depth(soc,
1610 adapter5->deflink->vdev_id,
1611 hdd_ctx->config->tx_hbw_flow_max_queue_depth);
1612 hdd_debug("MCC: MODE %s(%d), CH %d, LWM %d, HWM %d, TXQDEP %d",
1613 qdf_opmode_str(adapter5->device_mode),
1614 adapter5->device_mode,
1615 channel5,
1616 adapter5->tx_flow_low_watermark,
1617 adapter5->tx_flow_low_watermark +
1618 adapter5->tx_flow_hi_watermark_offset,
1619 hdd_ctx->config->tx_hbw_flow_max_queue_depth);
1620
1621 if (!adapter2_4) {
1622 hdd_err("MCC: 2.4GHz adapter context NULL");
1623 hdd_adapter_dev_put_debug(adapter, dbgid);
1624 return;
1625 }
1626 adapter2_4->tx_flow_low_watermark =
1627 hdd_ctx->config->tx_lbw_flow_low_watermark;
1628 adapter2_4->tx_flow_hi_watermark_offset =
1629 hdd_ctx->config->tx_lbw_flow_hi_watermark_offset;
1630 cdp_fc_ll_set_tx_pause_q_depth(soc,
1631 adapter2_4->deflink->vdev_id,
1632 hdd_ctx->config->tx_lbw_flow_max_queue_depth);
1633 hdd_debug("MCC: MODE %s(%d), CH %d, LWM %d, HWM %d, TXQDEP %d",
1634 qdf_opmode_str(adapter2_4->device_mode),
1635 adapter2_4->device_mode,
1636 channel24,
1637 adapter2_4->tx_flow_low_watermark,
1638 adapter2_4->tx_flow_low_watermark +
1639 adapter2_4->tx_flow_hi_watermark_offset,
1640 hdd_ctx->config->tx_lbw_flow_max_queue_depth);
1641 }
1642 }
1643 }
1644
wlan_hdd_set_tx_flow_info(void)1645 void wlan_hdd_set_tx_flow_info(void)
1646 {
1647 struct hdd_adapter *adapter, *next_adapter = NULL;
1648 struct hdd_station_ctx *sta_ctx;
1649 struct hdd_ap_ctx *ap_ctx;
1650 struct hdd_hostapd_state *hostapd_state;
1651 uint8_t sta_chan = 0, ap_chan = 0;
1652 uint32_t chan_freq;
1653 struct hdd_context *hdd_ctx;
1654 uint8_t target_channel = 0;
1655 uint8_t pre_adp_channel = 0;
1656 struct hdd_adapter *pre_adp_ctx = NULL;
1657 wlan_net_dev_ref_dbgid dbgid = NET_DEV_HOLD_IPA_SET_TX_FLOW_INFO;
1658
1659 hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
1660 if (!hdd_ctx)
1661 return;
1662
1663 hdd_for_each_adapter_dev_held_safe(hdd_ctx, adapter, next_adapter,
1664 dbgid) {
1665 switch (adapter->device_mode) {
1666 case QDF_STA_MODE:
1667 case QDF_P2P_CLIENT_MODE:
1668 sta_ctx =
1669 WLAN_HDD_GET_STATION_CTX_PTR(adapter->deflink);
1670 if (hdd_cm_is_vdev_associated(adapter->deflink)) {
1671 chan_freq = sta_ctx->conn_info.chan_freq;
1672 sta_chan = wlan_reg_freq_to_chan(hdd_ctx->pdev,
1673 chan_freq);
1674 target_channel = sta_chan;
1675 }
1676 break;
1677 case QDF_SAP_MODE:
1678 case QDF_P2P_GO_MODE:
1679 ap_ctx = WLAN_HDD_GET_AP_CTX_PTR(adapter->deflink);
1680 hostapd_state =
1681 WLAN_HDD_GET_HOSTAP_STATE_PTR(adapter->deflink);
1682 if (hostapd_state->bss_state == BSS_START &&
1683 hostapd_state->qdf_status == QDF_STATUS_SUCCESS) {
1684 chan_freq = ap_ctx->operating_chan_freq;
1685 ap_chan = wlan_reg_freq_to_chan(hdd_ctx->pdev,
1686 chan_freq);
1687 target_channel = ap_chan;
1688 }
1689 break;
1690 default:
1691 break;
1692 }
1693
1694 hdd_set_tx_flow_info(adapter,
1695 &pre_adp_ctx,
1696 target_channel,
1697 &pre_adp_channel,
1698 dbgid);
1699 target_channel = 0;
1700
1701 hdd_adapter_dev_put_debug(adapter, dbgid);
1702 }
1703 }
1704 #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
1705