1 /*
2 * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /**
21 * DOC: wlan_hdd_ipa.c
22 *
23 * WLAN HDD and ipa interface implementation
24 */
25
26 /* Include Files */
27 #include <wlan_hdd_includes.h>
28 #include <wlan_hdd_ipa.h>
29 #include "wlan_policy_mgr_ucfg.h"
30 #include "wlan_ipa_ucfg_api.h"
31 #include <wlan_hdd_softap_tx_rx.h>
32 #include <linux/inetdevice.h>
33 #include <qdf_trace.h>
34 /* Test against msm kernel version */
35 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) && \
36 IS_ENABLED(CONFIG_SCHED_WALT)
37 #include <linux/sched/walt.h>
38 #endif
39 #include "wlan_hdd_object_manager.h"
40 #include "wlan_dp_ucfg_api.h"
41
42 #ifdef IPA_OFFLOAD
43
44 /**
45 * struct hdd_ipa_connection_info - connectio info for IPA component
46 * @vdev_id: vdev id
47 * @ch_freq: channel frequency
48 * @ch_width: channel width
49 * @wlan_80211_mode: enum qca_wlan_802_11_mode
50 */
51 struct hdd_ipa_connection_info {
52 uint8_t vdev_id;
53 qdf_freq_t ch_freq;
54 enum phy_ch_width ch_width;
55 enum qca_wlan_802_11_mode wlan_80211_mode;
56 };
57
58 #if (defined(QCA_CONFIG_SMP) && defined(PF_WAKE_UP_IDLE)) ||\
59 IS_ENABLED(CONFIG_SCHED_WALT)
60 /**
61 * hdd_ipa_get_wake_up_idle() - Get PF_WAKE_UP_IDLE flag in the task structure
62 *
63 * Get PF_WAKE_UP_IDLE flag in the task structure
64 *
65 * Return: 1 if PF_WAKE_UP_IDLE flag is set, 0 otherwise
66 */
hdd_ipa_get_wake_up_idle(void)67 static uint32_t hdd_ipa_get_wake_up_idle(void)
68 {
69 return sched_get_wake_up_idle(current);
70 }
71
72 /**
73 * hdd_ipa_set_wake_up_idle() - Set PF_WAKE_UP_IDLE flag in the task structure
74 * @wake_up_idle: Value to set PF_WAKE_UP_IDLE flag
75 *
76 * Set PF_WAKE_UP_IDLE flag in the task structure
77 * This task and any task woken by this will be waken to idle CPU
78 *
79 * Return: None
80 */
hdd_ipa_set_wake_up_idle(bool wake_up_idle)81 static void hdd_ipa_set_wake_up_idle(bool wake_up_idle)
82 {
83 sched_set_wake_up_idle(current, wake_up_idle);
84 }
85 #else
hdd_ipa_get_wake_up_idle(void)86 static uint32_t hdd_ipa_get_wake_up_idle(void)
87 {
88 return 0;
89 }
90
hdd_ipa_set_wake_up_idle(bool wake_up_idle)91 static void hdd_ipa_set_wake_up_idle(bool wake_up_idle)
92 {
93 }
94 #endif
95
96 #ifdef QCA_CONFIG_SMP
97 /**
98 * hdd_ipa_send_to_nw_stack() - Check if IPA supports NAPI
99 * polling during RX
100 * @skb : data buffer sent to network stack
101 *
102 * If IPA LAN RX supports NAPI polling mechanism use
103 * netif_receive_skb instead of netif_rx_ni to forward the skb
104 * to network stack.
105 *
106 * Return: Return value from netif_rx_ni/netif_receive_skb
107 */
hdd_ipa_send_to_nw_stack(qdf_nbuf_t skb)108 static int hdd_ipa_send_to_nw_stack(qdf_nbuf_t skb)
109 {
110 int result;
111
112 if (qdf_ipa_get_lan_rx_napi())
113 result = netif_receive_skb(skb);
114 else
115 result = netif_rx_ni(skb);
116 return result;
117 }
118 #else
hdd_ipa_send_to_nw_stack(qdf_nbuf_t skb)119 static int hdd_ipa_send_to_nw_stack(qdf_nbuf_t skb)
120 {
121 int result;
122
123 result = netif_rx_ni(skb);
124 return result;
125 }
126 #endif
127
128 #ifdef QCA_CONFIG_SMP
129
130 /**
131 * hdd_ipa_aggregated_rx_ind() - Submit aggregated packets to the stack
132 * @skb: skb to be submitted to the stack
133 *
134 * For CONFIG_SMP systems, simply call netif_rx_ni.
135 * For non CONFIG_SMP systems call netif_rx till
136 * IPA_WLAN_RX_SOFTIRQ_THRESH. When threshold is reached call netif_rx_ni.
137 * In this manner, UDP/TCP packets are sent in an aggregated way to the stack.
138 * For IP/ICMP packets, simply call netif_rx_ni.
139 *
140 * Check if IPA supports NAPI polling then use netif_receive_skb
141 * instead of netif_rx_ni.
142 *
143 * Return: return value from the netif_rx_ni/netif_rx api.
144 */
hdd_ipa_aggregated_rx_ind(qdf_nbuf_t skb)145 static int hdd_ipa_aggregated_rx_ind(qdf_nbuf_t skb)
146 {
147 int ret;
148
149 ret = hdd_ipa_send_to_nw_stack(skb);
150 return ret;
151 }
152 #else
hdd_ipa_aggregated_rx_ind(qdf_nbuf_t skb)153 static int hdd_ipa_aggregated_rx_ind(qdf_nbuf_t skb)
154 {
155 struct iphdr *ip_h;
156 static atomic_t softirq_mitigation_cntr =
157 ATOMIC_INIT(IPA_WLAN_RX_SOFTIRQ_THRESH);
158 int result;
159
160 ip_h = (struct iphdr *)(skb->data);
161 if ((skb->protocol == htons(ETH_P_IP)) &&
162 (ip_h->protocol == IPPROTO_ICMP)) {
163 result = hdd_ipa_send_to_nw_stack(skb);
164 } else {
165 /* Call netif_rx_ni for every IPA_WLAN_RX_SOFTIRQ_THRESH packets
166 * to avoid excessive softirq's.
167 */
168 if (atomic_dec_and_test(&softirq_mitigation_cntr)) {
169 result = hdd_ipa_send_to_nw_stack(skb);
170 atomic_set(&softirq_mitigation_cntr,
171 IPA_WLAN_RX_SOFTIRQ_THRESH);
172 } else {
173 result = netif_rx(skb);
174 }
175 }
176
177 return result;
178 }
179 #endif
180
hdd_ipa_send_nbuf_to_network(qdf_nbuf_t nbuf,qdf_netdev_t dev)181 void hdd_ipa_send_nbuf_to_network(qdf_nbuf_t nbuf, qdf_netdev_t dev)
182 {
183 struct hdd_adapter *adapter = (struct hdd_adapter *) netdev_priv(dev);
184 struct wlan_objmgr_vdev *vdev;
185 int result;
186 bool delivered = false;
187 uint32_t enabled, len = 0;
188 struct hdd_tx_rx_stats *stats;
189 struct hdd_station_ctx *sta_ctx;
190 bool is_eapol;
191 u8 *ta_addr = NULL;
192
193 if (hdd_validate_adapter(adapter)) {
194 kfree_skb(nbuf);
195 return;
196 }
197
198 if (cds_is_driver_unloading()) {
199 kfree_skb(nbuf);
200 return;
201 }
202
203 stats = &adapter->deflink->hdd_stats.tx_rx_stats;
204 hdd_ipa_update_rx_mcbc_stats(adapter, nbuf);
205
206 if ((adapter->device_mode == QDF_SAP_MODE) &&
207 (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf) == true)) {
208 /* Send DHCP Indication to FW */
209 vdev = hdd_objmgr_get_vdev_by_user(adapter->deflink,
210 WLAN_DP_ID);
211 if (vdev) {
212 ucfg_dp_softap_inspect_dhcp_packet(vdev, nbuf, QDF_RX);
213 hdd_objmgr_put_vdev_by_user(vdev, WLAN_DP_ID);
214 }
215 }
216
217 is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf);
218
219 qdf_dp_trace_set_track(nbuf, QDF_RX);
220
221 ucfg_dp_event_eapol_log(nbuf, QDF_RX);
222 qdf_dp_trace_log_pkt(adapter->deflink->vdev_id,
223 nbuf, QDF_RX, QDF_TRACE_DEFAULT_PDEV_ID,
224 adapter->device_mode);
225 DPTRACE(qdf_dp_trace(nbuf,
226 QDF_DP_TRACE_RX_HDD_PACKET_PTR_RECORD,
227 QDF_TRACE_DEFAULT_PDEV_ID,
228 qdf_nbuf_data_addr(nbuf),
229 sizeof(qdf_nbuf_data(nbuf)), QDF_RX));
230 DPTRACE(qdf_dp_trace_data_pkt(nbuf, QDF_TRACE_DEFAULT_PDEV_ID,
231 QDF_DP_TRACE_RX_PACKET_RECORD, 0,
232 QDF_RX));
233
234 /*
235 * Set PF_WAKE_UP_IDLE flag in the task structure
236 * This task and any task woken by this will be waken to idle CPU
237 */
238 enabled = hdd_ipa_get_wake_up_idle();
239 if (!enabled)
240 hdd_ipa_set_wake_up_idle(true);
241
242 nbuf->dev = adapter->dev;
243 nbuf->protocol = eth_type_trans(nbuf, nbuf->dev);
244 nbuf->ip_summed = CHECKSUM_NONE;
245 len = nbuf->len;
246
247 /*
248 * Update STA RX exception packet stats.
249 * For SAP as part of IPA HW stats are updated.
250 */
251
252 if (is_eapol && SEND_EAPOL_OVER_NL) {
253 if (adapter->device_mode == QDF_SAP_MODE) {
254 ta_addr = adapter->mac_addr.bytes;
255 } else if (adapter->device_mode == QDF_STA_MODE) {
256 sta_ctx =
257 WLAN_HDD_GET_STATION_CTX_PTR(adapter->deflink);
258 ta_addr = (u8 *)&sta_ctx->conn_info.peer_macaddr;
259 }
260
261 if (ta_addr) {
262 if (wlan_hdd_cfg80211_rx_control_port(adapter->dev,
263 ta_addr, nbuf,
264 false))
265 result = NET_RX_SUCCESS;
266 else
267 result = NET_RX_DROP;
268 } else {
269 result = NET_RX_DROP;
270 }
271
272 dev_kfree_skb(nbuf);
273 } else {
274 result = hdd_ipa_aggregated_rx_ind(nbuf);
275 }
276
277 if (result == NET_RX_SUCCESS)
278 delivered = true;
279 /*
280 * adapter->vdev is directly dereferenced because this is per packet
281 * path, hdd_get_vdev_by_user() usage will be very costly as it involves
282 * lock access.
283 * Expectation here is vdev will be present during TX/RX processing
284 * and also DP internally maintaining vdev ref count
285 */
286 ucfg_dp_inc_rx_pkt_stats(adapter->deflink->vdev,
287 len, delivered);
288 /*
289 * Restore PF_WAKE_UP_IDLE flag in the task structure
290 */
291 if (!enabled)
292 hdd_ipa_set_wake_up_idle(false);
293 }
294
hdd_ipa_set_mcc_mode(bool mcc_mode)295 void hdd_ipa_set_mcc_mode(bool mcc_mode)
296 {
297 struct hdd_context *hdd_ctx;
298
299 hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
300 if (!hdd_ctx)
301 return;
302
303 ucfg_ipa_set_mcc_mode(hdd_ctx->pdev, mcc_mode);
304 }
305
306 #ifdef IPA_WDI3_TX_TWO_PIPES
307 static void
hdd_ipa_fill_sta_connection_info(struct wlan_hdd_link_info * link,struct hdd_ipa_connection_info * conn)308 hdd_ipa_fill_sta_connection_info(struct wlan_hdd_link_info *link,
309 struct hdd_ipa_connection_info *conn)
310 {
311 struct hdd_station_ctx *ctx = WLAN_HDD_GET_STATION_CTX_PTR(link);
312
313 conn->ch_freq = ctx->conn_info.chan_freq;
314 conn->ch_width = ctx->conn_info.ch_width;
315 conn->wlan_80211_mode = hdd_convert_cfgdot11mode_to_80211mode(
316 ctx->conn_info.dot11mode);
317 }
318
319 static void
hdd_ipa_fill_sap_connection_info(struct wlan_hdd_link_info * link,struct hdd_ipa_connection_info * conn)320 hdd_ipa_fill_sap_connection_info(struct wlan_hdd_link_info *link,
321 struct hdd_ipa_connection_info *conn)
322 {
323 struct hdd_ap_ctx *ctx = WLAN_HDD_GET_AP_CTX_PTR(link);
324
325 conn->ch_freq = ctx->operating_chan_freq;
326 conn->ch_width = ctx->sap_config.ch_params.ch_width;
327 conn->wlan_80211_mode = hdd_convert_phymode_to_80211mode(
328 ctx->sap_config.SapHw_mode);
329 }
330
hdd_ipa_fill_connection_info(struct wlan_hdd_link_info * link,struct hdd_ipa_connection_info * conn)331 static void hdd_ipa_fill_connection_info(struct wlan_hdd_link_info *link,
332 struct hdd_ipa_connection_info *conn)
333 {
334 struct hdd_adapter *adapter = link->adapter;
335
336 conn->vdev_id = link->vdev_id;
337
338 if (adapter->device_mode == QDF_STA_MODE)
339 hdd_ipa_fill_sta_connection_info(link, conn);
340 else if (adapter->device_mode == QDF_SAP_MODE)
341 hdd_ipa_fill_sap_connection_info(link, conn);
342 }
343
344 static QDF_STATUS
hdd_ipa_get_tx_pipe_multi_conn(struct hdd_context * hdd_ctx,struct hdd_ipa_connection_info * conn,bool * tx_pipe)345 hdd_ipa_get_tx_pipe_multi_conn(struct hdd_context *hdd_ctx,
346 struct hdd_ipa_connection_info *conn,
347 bool *tx_pipe)
348 {
349 uint32_t new_freq = conn->ch_freq;
350 QDF_STATUS status;
351 uint8_t vdev_id;
352 bool pipe;
353
354 if (ucfg_policy_mgr_get_vdev_same_freq_new_conn(hdd_ctx->psoc,
355 new_freq,
356 &vdev_id)) {
357 /* Inherit the pipe selection of the connection that has
358 * same freq.
359 */
360 return ucfg_ipa_get_alt_pipe(hdd_ctx->pdev, vdev_id, tx_pipe);
361 } else {
362 if (ucfg_policy_mgr_get_vdev_diff_freq_new_conn(hdd_ctx->psoc,
363 new_freq,
364 &vdev_id)) {
365 status = ucfg_ipa_get_alt_pipe(hdd_ctx->pdev, vdev_id,
366 &pipe);
367 if (QDF_IS_STATUS_ERROR(status))
368 return QDF_STATUS_E_INVAL;
369
370 /* Inverse the pipe selection of the connection that
371 * has different channel frequency.
372 */
373 *tx_pipe = !pipe;
374 return QDF_STATUS_SUCCESS;
375 } else {
376 return QDF_STATUS_E_INVAL;
377 }
378 }
379 }
380
hdd_ipa_get_tx_pipe(struct hdd_context * hdd_ctx,struct wlan_hdd_link_info * link,bool * tx_pipe)381 QDF_STATUS hdd_ipa_get_tx_pipe(struct hdd_context *hdd_ctx,
382 struct wlan_hdd_link_info *link,
383 bool *tx_pipe)
384 {
385 struct hdd_ipa_connection_info conn;
386 uint32_t count;
387
388 if (qdf_unlikely(!hdd_ctx || !link || !tx_pipe)) {
389 hdd_debug("Invalid parameters");
390 return QDF_STATUS_E_INVAL;
391 }
392
393 /* If SBS not capable, use legacy DBS selection */
394 if (!ucfg_policy_mgr_is_hw_sbs_capable(hdd_ctx->psoc)) {
395 hdd_debug("firmware is not sbs capable");
396 *tx_pipe = WLAN_REG_IS_24GHZ_CH_FREQ(conn.ch_freq);
397 return QDF_STATUS_SUCCESS;
398 }
399
400 hdd_ipa_fill_connection_info(link, &conn);
401
402 /* Always select the primary pipe for connection that is EHT160 or
403 * EHT320 due to higher tput requiements.
404 */
405 if (conn.wlan_80211_mode == QCA_WLAN_802_11_MODE_11BE &&
406 (conn.ch_width == CH_WIDTH_160MHZ ||
407 conn.ch_width == CH_WIDTH_320MHZ)) {
408 *tx_pipe = false;
409 return QDF_STATUS_SUCCESS;
410 }
411
412 count = ucfg_policy_mgr_get_connection_count(hdd_ctx->psoc);
413 if (!count) {
414 /* For first connection that is below EHT160, select the
415 * alternate pipe so as to reserve the primary pipe for
416 * potential connections that are above EHT160.
417 */
418 *tx_pipe = true;
419 return QDF_STATUS_SUCCESS;
420 }
421
422 return hdd_ipa_get_tx_pipe_multi_conn(hdd_ctx, &conn, tx_pipe);
423 }
424 #else /* !IPA_WDI3_TX_TWO_PIPES */
hdd_ipa_get_tx_pipe(struct hdd_context * hdd_ctx,struct wlan_hdd_link_info * link,bool * tx_pipe)425 QDF_STATUS hdd_ipa_get_tx_pipe(struct hdd_context *hdd_ctx,
426 struct wlan_hdd_link_info *link,
427 bool *tx_pipe)
428 {
429 if (qdf_unlikely(!tx_pipe))
430 return QDF_STATUS_E_INVAL;
431
432 /* For IPA_WDI3_TX_TWO_PIPES=n, only one tx pipe is available */
433 *tx_pipe = false;
434
435 return QDF_STATUS_SUCCESS;
436 }
437 #endif /* IPA_WDI3_TX_TWO_PIPES */
438
hdd_ipa_set_perf_level_bw(enum hw_mode_bandwidth bw)439 void hdd_ipa_set_perf_level_bw(enum hw_mode_bandwidth bw)
440 {
441 struct hdd_context *hdd_ctx;
442 enum wlan_ipa_bw_level lvl;
443
444 hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
445 if (!hdd_ctx)
446 return;
447
448 if (bw == HW_MODE_320_MHZ)
449 lvl = WLAN_IPA_BW_LEVEL_HIGH;
450 else if (bw == HW_MODE_160_MHZ)
451 lvl = WLAN_IPA_BW_LEVEL_MEDIUM;
452 else
453 lvl = WLAN_IPA_BW_LEVEL_LOW;
454
455 hdd_debug("Vote IPA perf level to %d", lvl);
456 ucfg_ipa_set_perf_level_bw(hdd_ctx->pdev, lvl);
457 }
458
459 #endif
460