xref: /wlan-driver/qcacld-3.0/components/dp/core/src/wlan_dp_txrx.c (revision 5113495b16420b49004c444715d2daae2066e7dc) !
1 /*
2  * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18  /**
19   * DOC: wlan_dp_txrx.c
20   * DP TX/RX path implementation
21   *
22   *
23   */
24 
25 #include <wlan_dp_priv.h>
26 #include <wlan_dp_main.h>
27 #include <wlan_dp_txrx.h>
28 #include <qdf_types.h>
29 #include <cdp_txrx_cmn.h>
30 #include <cdp_txrx_peer_ops.h>
31 #include <cdp_txrx_misc.h>
32 #include <cdp_txrx_flow_ctrl_v2.h>
33 #include "wlan_dp_rx_thread.h"
34 #if defined(WLAN_SUPPORT_RX_FISA)
35 #include "wlan_dp_fisa_rx.h"
36 #endif
37 #include "nan_public_structs.h"
38 #include "wlan_nan_api_i.h"
39 #include <wlan_cm_api.h>
40 #include <enet.h>
41 #include <cds_utils.h>
42 #include <wlan_dp_bus_bandwidth.h>
43 #include "wlan_tdls_api.h"
44 #include <qdf_trace.h>
45 #include <qdf_net_stats.h>
46 
wlan_dp_intf_get_pkt_type_bitmap_value(void * intf_ctx)47 uint32_t wlan_dp_intf_get_pkt_type_bitmap_value(void *intf_ctx)
48 {
49 	struct wlan_dp_intf *dp_intf = (struct wlan_dp_intf *)intf_ctx;
50 
51 	if (!dp_intf) {
52 		dp_err_rl("DP Context is NULL");
53 		return 0;
54 	}
55 
56 	return dp_intf->pkt_type_bitmap;
57 }
58 
59 #if defined(WLAN_SUPPORT_RX_FISA)
dp_rx_skip_fisa(struct wlan_dp_psoc_context * dp_ctx,uint32_t value)60 void dp_rx_skip_fisa(struct wlan_dp_psoc_context *dp_ctx, uint32_t value)
61 {
62 	qdf_atomic_set(&dp_ctx->skip_fisa_param.skip_fisa, !value);
63 }
64 #endif
65 
66 #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
dp_get_tx_resource(struct wlan_dp_link * dp_link,struct qdf_mac_addr * mac_addr)67 void dp_get_tx_resource(struct wlan_dp_link *dp_link,
68 			struct qdf_mac_addr *mac_addr)
69 {
70 	struct wlan_dp_intf *dp_intf = dp_link->dp_intf;
71 	struct wlan_dp_psoc_callbacks *dp_ops = &dp_intf->dp_ctx->dp_ops;
72 
73 	dp_ops->dp_get_tx_resource(dp_link->link_id,
74 				   mac_addr);
75 }
76 #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
77 
78 #ifdef FEATURE_WLAN_DIAG_SUPPORT
79 /**
80  * dp_event_eapol_log() - send event to wlan diag
81  * @nbuf: Network buffer ptr
82  * @dir: direction
83  *
84  * Return: None
85  */
dp_event_eapol_log(qdf_nbuf_t nbuf,enum qdf_proto_dir dir)86 void dp_event_eapol_log(qdf_nbuf_t nbuf, enum qdf_proto_dir dir)
87 {
88 	int16_t eapol_key_info;
89 
90 	WLAN_HOST_DIAG_EVENT_DEF(wlan_diag_event, struct host_event_wlan_eapol);
91 
92 	if (dir == QDF_TX && QDF_NBUF_CB_PACKET_TYPE_EAPOL !=
93 	    QDF_NBUF_CB_GET_PACKET_TYPE(nbuf))
94 		return;
95 	else if (!qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
96 		return;
97 
98 	eapol_key_info = (uint16_t)(*(uint16_t *)
99 				(nbuf->data + EAPOL_KEY_INFO_OFFSET));
100 
101 	wlan_diag_event.event_sub_type =
102 		(dir == QDF_TX ?
103 		 WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED :
104 		 WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED);
105 	wlan_diag_event.eapol_packet_type = (uint8_t)(*(uint8_t *)
106 				(nbuf->data + EAPOL_PACKET_TYPE_OFFSET));
107 	wlan_diag_event.eapol_key_info = eapol_key_info;
108 	wlan_diag_event.eapol_rate = 0;
109 	qdf_mem_copy(wlan_diag_event.dest_addr,
110 		     (nbuf->data + QDF_NBUF_DEST_MAC_OFFSET),
111 		     sizeof(wlan_diag_event.dest_addr));
112 	qdf_mem_copy(wlan_diag_event.src_addr,
113 		     (nbuf->data + QDF_NBUF_SRC_MAC_OFFSET),
114 		     sizeof(wlan_diag_event.src_addr));
115 
116 	WLAN_HOST_DIAG_EVENT_REPORT(&wlan_diag_event, EVENT_WLAN_EAPOL);
117 }
118 #endif /* FEATURE_WLAN_DIAG_SUPPORT */
119 
dp_intf_is_tx_allowed(qdf_nbuf_t nbuf,uint8_t intf_id,void * soc,uint8_t * peer_mac)120 static int dp_intf_is_tx_allowed(qdf_nbuf_t nbuf,
121 				 uint8_t intf_id, void *soc,
122 				 uint8_t *peer_mac)
123 {
124 	enum ol_txrx_peer_state peer_state;
125 
126 	peer_state = cdp_peer_state_get(soc, intf_id, peer_mac, false);
127 	if (qdf_likely(OL_TXRX_PEER_STATE_AUTH == peer_state))
128 		return true;
129 	if (OL_TXRX_PEER_STATE_CONN == peer_state &&
130 	    (qdf_ntohs(qdf_nbuf_get_protocol(nbuf)) == ETHERTYPE_PAE ||
131 	     IS_DP_ETHERTYPE_WAI(nbuf)))
132 		return true;
133 
134 	dp_info("Invalid peer state for Tx: %d", peer_state);
135 	return false;
136 }
137 
138 /**
139  * dp_tx_rx_is_dns_domain_name_match() - function to check whether dns
140  * domain name in the received nbuf matches with the tracking dns domain
141  * name or not
142  *
143  * @nbuf: Network buffer pointer
144  * @dp_intf: DP interface pointer
145  *
146  * Returns: true if matches else false
147  */
dp_tx_rx_is_dns_domain_name_match(qdf_nbuf_t nbuf,struct wlan_dp_intf * dp_intf)148 static bool dp_tx_rx_is_dns_domain_name_match(qdf_nbuf_t nbuf,
149 					      struct wlan_dp_intf *dp_intf)
150 {
151 	uint8_t *domain_name;
152 
153 	if (dp_intf->track_dns_domain_len == 0)
154 		return false;
155 
156 	/* check OOB , is strncmp accessing data more than skb->len */
157 	if ((dp_intf->track_dns_domain_len +
158 	    QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET) > qdf_nbuf_len(nbuf))
159 		return false;
160 
161 	domain_name = qdf_nbuf_get_dns_domain_name(nbuf,
162 						dp_intf->track_dns_domain_len);
163 	if (qdf_str_ncmp(domain_name, dp_intf->dns_payload,
164 			 dp_intf->track_dns_domain_len) == 0)
165 		return true;
166 	else
167 		return false;
168 }
169 
170 /**
171  * dp_clear_tx_rx_connectivity_stats() - clear connectivity stats
172  * @dp_intf: pointer to DP interface
173  *
174  * Return: None
175  */
dp_clear_tx_rx_connectivity_stats(struct wlan_dp_intf * dp_intf)176 static void dp_clear_tx_rx_connectivity_stats(struct wlan_dp_intf *dp_intf)
177 {
178 	dp_debug("Clear txrx connectivity stats");
179 	qdf_mem_zero(&dp_intf->dp_stats.arp_stats,
180 		     sizeof(dp_intf->dp_stats.arp_stats));
181 	qdf_mem_zero(&dp_intf->dp_stats.dns_stats,
182 		     sizeof(dp_intf->dp_stats.dns_stats));
183 	qdf_mem_zero(&dp_intf->dp_stats.tcp_stats,
184 		     sizeof(dp_intf->dp_stats.tcp_stats));
185 	qdf_mem_zero(&dp_intf->dp_stats.icmpv4_stats,
186 		     sizeof(dp_intf->dp_stats.icmpv4_stats));
187 	dp_intf->pkt_type_bitmap = 0;
188 	dp_intf->track_arp_ip = 0;
189 	qdf_mem_zero(dp_intf->dns_payload, dp_intf->track_dns_domain_len);
190 	dp_intf->track_dns_domain_len = 0;
191 	dp_intf->track_src_port = 0;
192 	dp_intf->track_dest_port = 0;
193 	dp_intf->track_dest_ipv4 = 0;
194 }
195 
dp_reset_all_intfs_connectivity_stats(struct wlan_dp_psoc_context * dp_ctx)196 void dp_reset_all_intfs_connectivity_stats(struct wlan_dp_psoc_context *dp_ctx)
197 {
198 	struct wlan_dp_intf *dp_intf = NULL;
199 
200 	qdf_spin_lock_bh(&dp_ctx->intf_list_lock);
201 	for (dp_get_front_intf_no_lock(dp_ctx, &dp_intf); dp_intf;
202 	     dp_get_next_intf_no_lock(dp_ctx, dp_intf, &dp_intf)) {
203 		dp_clear_tx_rx_connectivity_stats(dp_intf);
204 	}
205 	qdf_spin_unlock_bh(&dp_ctx->intf_list_lock);
206 }
207 
208 void
dp_tx_rx_collect_connectivity_stats_info(qdf_nbuf_t nbuf,void * context,enum connectivity_stats_pkt_status action,uint8_t * pkt_type)209 dp_tx_rx_collect_connectivity_stats_info(qdf_nbuf_t nbuf, void *context,
210 		enum connectivity_stats_pkt_status action, uint8_t *pkt_type)
211 {
212 	uint32_t pkt_type_bitmap;
213 	struct wlan_dp_link *dp_link = (struct wlan_dp_link *)context;
214 	struct wlan_dp_intf *dp_intf = dp_link->dp_intf;
215 
216 	/* ARP tracking is done already. */
217 	pkt_type_bitmap = dp_intf->pkt_type_bitmap;
218 
219 	pkt_type_bitmap &=  ~dp_intf->dp_ctx->arp_connectivity_map;
220 
221 	if (!pkt_type_bitmap)
222 		return;
223 
224 	switch (action) {
225 	case PKT_TYPE_REQ:
226 	case PKT_TYPE_TX_HOST_FW_SENT:
227 		if (qdf_nbuf_is_icmp_pkt(nbuf)) {
228 			if (qdf_nbuf_data_is_icmpv4_req(nbuf) &&
229 			    dp_intf->track_dest_ipv4 ==
230 			    qdf_nbuf_get_icmpv4_tgt_ip(nbuf)) {
231 				*pkt_type = DP_CONNECTIVITY_CHECK_SET_ICMPV4;
232 				if (action == PKT_TYPE_REQ) {
233 					++dp_intf->dp_stats.icmpv4_stats.
234 							tx_icmpv4_req_count;
235 					dp_info("ICMPv4 Req packet");
236 				} else
237 					/* host receives tx completion */
238 					++dp_intf->dp_stats.icmpv4_stats.
239 						tx_host_fw_sent;
240 			}
241 		} else if (qdf_nbuf_is_ipv4_tcp_pkt(nbuf)) {
242 			if (qdf_nbuf_data_is_tcp_syn(nbuf) &&
243 			    dp_intf->track_dest_port ==
244 			    qdf_nbuf_data_get_tcp_dst_port(nbuf)) {
245 				*pkt_type = DP_CONNECTIVITY_CHECK_SET_TCP_SYN;
246 				if (action == PKT_TYPE_REQ) {
247 					++dp_intf->dp_stats.tcp_stats.
248 							tx_tcp_syn_count;
249 					dp_info("TCP Syn packet");
250 				} else {
251 					/* host receives tx completion */
252 					++dp_intf->dp_stats.tcp_stats.
253 							tx_tcp_syn_host_fw_sent;
254 				}
255 			} else if ((dp_intf->dp_stats.tcp_stats.
256 				    is_tcp_syn_ack_rcv || dp_intf->dp_stats.
257 					tcp_stats.is_tcp_ack_sent) &&
258 				   qdf_nbuf_data_is_tcp_ack(nbuf) &&
259 				   (dp_intf->track_dest_port ==
260 				    qdf_nbuf_data_get_tcp_dst_port(nbuf))) {
261 				*pkt_type = DP_CONNECTIVITY_CHECK_SET_TCP_ACK;
262 				if (action == PKT_TYPE_REQ &&
263 					dp_intf->dp_stats.tcp_stats.
264 							is_tcp_syn_ack_rcv) {
265 					++dp_intf->dp_stats.tcp_stats.
266 							tx_tcp_ack_count;
267 					dp_intf->dp_stats.tcp_stats.
268 						is_tcp_syn_ack_rcv = false;
269 					dp_intf->dp_stats.tcp_stats.
270 						is_tcp_ack_sent = true;
271 					dp_info("TCP Ack packet");
272 				} else if (action == PKT_TYPE_TX_HOST_FW_SENT &&
273 					dp_intf->dp_stats.tcp_stats.
274 							is_tcp_ack_sent) {
275 					/* host receives tx completion */
276 					++dp_intf->dp_stats.tcp_stats.
277 							tx_tcp_ack_host_fw_sent;
278 					dp_intf->dp_stats.tcp_stats.
279 							is_tcp_ack_sent = false;
280 				}
281 			}
282 		} else if (qdf_nbuf_is_ipv4_udp_pkt(nbuf)) {
283 			if (qdf_nbuf_data_is_dns_query(nbuf) &&
284 			    dp_tx_rx_is_dns_domain_name_match(nbuf, dp_intf)) {
285 				*pkt_type = DP_CONNECTIVITY_CHECK_SET_DNS;
286 				if (action == PKT_TYPE_REQ) {
287 					++dp_intf->dp_stats.dns_stats.
288 							tx_dns_req_count;
289 					dp_info("DNS query packet");
290 				} else
291 					/* host receives tx completion */
292 					++dp_intf->dp_stats.dns_stats.
293 								tx_host_fw_sent;
294 			}
295 		}
296 		break;
297 
298 	case PKT_TYPE_RSP:
299 		if (qdf_nbuf_is_icmp_pkt(nbuf)) {
300 			if (qdf_nbuf_data_is_icmpv4_rsp(nbuf) &&
301 			    (dp_intf->track_dest_ipv4 ==
302 					qdf_nbuf_get_icmpv4_src_ip(nbuf))) {
303 				++dp_intf->dp_stats.icmpv4_stats.
304 							rx_icmpv4_rsp_count;
305 				*pkt_type =
306 				DP_CONNECTIVITY_CHECK_SET_ICMPV4;
307 				dp_info("ICMPv4 resp packet");
308 			}
309 		} else if (qdf_nbuf_is_ipv4_tcp_pkt(nbuf)) {
310 			if (qdf_nbuf_data_is_tcp_syn_ack(nbuf) &&
311 			    (dp_intf->track_dest_port ==
312 					qdf_nbuf_data_get_tcp_src_port(nbuf))) {
313 				++dp_intf->dp_stats.tcp_stats.
314 							rx_tcp_syn_ack_count;
315 				dp_intf->dp_stats.tcp_stats.
316 					is_tcp_syn_ack_rcv = true;
317 				*pkt_type =
318 				DP_CONNECTIVITY_CHECK_SET_TCP_SYN_ACK;
319 				dp_info("TCP Syn ack packet");
320 			}
321 		} else if (qdf_nbuf_is_ipv4_udp_pkt(nbuf)) {
322 			if (qdf_nbuf_data_is_dns_response(nbuf) &&
323 			    dp_tx_rx_is_dns_domain_name_match(nbuf, dp_intf)) {
324 				++dp_intf->dp_stats.dns_stats.
325 							rx_dns_rsp_count;
326 				*pkt_type = DP_CONNECTIVITY_CHECK_SET_DNS;
327 				dp_info("DNS resp packet");
328 			}
329 		}
330 		break;
331 
332 	case PKT_TYPE_TX_DROPPED:
333 		switch (*pkt_type) {
334 		case DP_CONNECTIVITY_CHECK_SET_ICMPV4:
335 			++dp_intf->dp_stats.icmpv4_stats.tx_dropped;
336 			dp_info("ICMPv4 Req packet dropped");
337 			break;
338 		case DP_CONNECTIVITY_CHECK_SET_TCP_SYN:
339 			++dp_intf->dp_stats.tcp_stats.tx_tcp_syn_dropped;
340 			dp_info("TCP syn packet dropped");
341 			break;
342 		case DP_CONNECTIVITY_CHECK_SET_TCP_ACK:
343 			++dp_intf->dp_stats.tcp_stats.tx_tcp_ack_dropped;
344 			dp_info("TCP ack packet dropped");
345 			break;
346 		case DP_CONNECTIVITY_CHECK_SET_DNS:
347 			++dp_intf->dp_stats.dns_stats.tx_dropped;
348 			dp_info("DNS query packet dropped");
349 			break;
350 		default:
351 			break;
352 		}
353 		break;
354 	case PKT_TYPE_RX_DELIVERED:
355 		switch (*pkt_type) {
356 		case DP_CONNECTIVITY_CHECK_SET_ICMPV4:
357 			++dp_intf->dp_stats.icmpv4_stats.rx_delivered;
358 			break;
359 		case DP_CONNECTIVITY_CHECK_SET_TCP_SYN_ACK:
360 			++dp_intf->dp_stats.tcp_stats.rx_delivered;
361 			break;
362 		case DP_CONNECTIVITY_CHECK_SET_DNS:
363 			++dp_intf->dp_stats.dns_stats.rx_delivered;
364 			break;
365 		default:
366 			break;
367 		}
368 		break;
369 	case PKT_TYPE_RX_REFUSED:
370 		switch (*pkt_type) {
371 		case DP_CONNECTIVITY_CHECK_SET_ICMPV4:
372 			++dp_intf->dp_stats.icmpv4_stats.rx_refused;
373 			break;
374 		case DP_CONNECTIVITY_CHECK_SET_TCP_SYN_ACK:
375 			++dp_intf->dp_stats.tcp_stats.rx_refused;
376 			break;
377 		case DP_CONNECTIVITY_CHECK_SET_DNS:
378 			++dp_intf->dp_stats.dns_stats.rx_refused;
379 			break;
380 		default:
381 			break;
382 		}
383 		break;
384 	case PKT_TYPE_TX_ACK_CNT:
385 		switch (*pkt_type) {
386 		case DP_CONNECTIVITY_CHECK_SET_ICMPV4:
387 			++dp_intf->dp_stats.icmpv4_stats.tx_ack_cnt;
388 			break;
389 		case DP_CONNECTIVITY_CHECK_SET_TCP_SYN:
390 			++dp_intf->dp_stats.tcp_stats.tx_tcp_syn_ack_cnt;
391 			break;
392 		case DP_CONNECTIVITY_CHECK_SET_TCP_ACK:
393 			++dp_intf->dp_stats.tcp_stats.tx_tcp_ack_ack_cnt;
394 			break;
395 		case DP_CONNECTIVITY_CHECK_SET_DNS:
396 			++dp_intf->dp_stats.dns_stats.tx_ack_cnt;
397 			break;
398 		default:
399 			break;
400 		}
401 		break;
402 	default:
403 		break;
404 	}
405 }
406 
407 /**
408  * dp_get_transmit_mac_addr() - Get the mac address to validate the xmit
409  * @dp_link: DP link handle
410  * @nbuf: The network buffer
411  * @mac_addr_tx_allowed: The mac address to be filled
412  *
413  * Return: None
414  */
415 static
dp_get_transmit_mac_addr(struct wlan_dp_link * dp_link,qdf_nbuf_t nbuf,struct qdf_mac_addr * mac_addr_tx_allowed)416 void dp_get_transmit_mac_addr(struct wlan_dp_link *dp_link,
417 			      qdf_nbuf_t nbuf,
418 			      struct qdf_mac_addr *mac_addr_tx_allowed)
419 {
420 	struct wlan_dp_intf *dp_intf = dp_link->dp_intf;
421 	bool is_mc_bc_addr = false;
422 	enum nan_datapath_state state;
423 
424 	/* Check for VDEV validity before accessing it. Since VDEV references
425 	 * are not taken in the per packet path, there is a change for VDEV
426 	 * getting deleted in a parallel context. Because DP VDEV object is
427 	 * protected by dp_intf::num_active_task, the chance of VDEV object
428 	 * getting deleted while executing dp_start_xmit() is sparse. So, a
429 	 * simple VDEV NULL check should be sufficient to handle the case of
430 	 * VDEV getting destroyed first followed by dp_start_xmit().
431 	 */
432 	if (!dp_link->vdev)
433 		return;
434 
435 	switch (dp_intf->device_mode) {
436 	case QDF_NDI_MODE:
437 		state = wlan_nan_get_ndi_state(dp_link->vdev);
438 		if (state == NAN_DATA_NDI_CREATED_STATE ||
439 		    state == NAN_DATA_CONNECTED_STATE ||
440 		    state == NAN_DATA_CONNECTING_STATE ||
441 		    state == NAN_DATA_PEER_CREATE_STATE) {
442 			if (QDF_NBUF_CB_GET_IS_BCAST(nbuf) ||
443 			    QDF_NBUF_CB_GET_IS_MCAST(nbuf))
444 				is_mc_bc_addr = true;
445 			if (is_mc_bc_addr)
446 				qdf_copy_macaddr(mac_addr_tx_allowed,
447 						 &dp_intf->mac_addr);
448 			else
449 				qdf_copy_macaddr(mac_addr_tx_allowed,
450 				(struct qdf_mac_addr *)qdf_nbuf_data(nbuf));
451 		}
452 		break;
453 	case QDF_STA_MODE:
454 	case QDF_P2P_CLIENT_MODE:
455 		if (wlan_cm_is_vdev_active(dp_link->vdev))
456 			qdf_copy_macaddr(mac_addr_tx_allowed,
457 					 &dp_link->conn_info.bssid);
458 		break;
459 	default:
460 		break;
461 	}
462 }
463 
464 #ifdef HANDLE_BROADCAST_EAPOL_TX_FRAME
465 /**
466  * dp_fix_broadcast_eapol() - Fix broadcast eapol
467  * @dp_link: pointer to dp link
468  * @nbuf: pointer to nbuf
469  *
470  * Override DA of broadcast eapol with bssid addr.
471  *
472  * Return: None
473  */
dp_fix_broadcast_eapol(struct wlan_dp_link * dp_link,qdf_nbuf_t nbuf)474 static void dp_fix_broadcast_eapol(struct wlan_dp_link *dp_link,
475 				   qdf_nbuf_t nbuf)
476 {
477 	qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
478 	unsigned char *ap_mac_addr =
479 		&dp_link->conn_info.bssid.bytes[0];
480 
481 	if (qdf_unlikely((QDF_NBUF_CB_GET_PACKET_TYPE(nbuf) ==
482 			  QDF_NBUF_CB_PACKET_TYPE_EAPOL) &&
483 			 QDF_NBUF_CB_GET_IS_BCAST(nbuf))) {
484 		dp_debug("SA: "QDF_MAC_ADDR_FMT " override DA: "QDF_MAC_ADDR_FMT " with AP mac address "QDF_MAC_ADDR_FMT,
485 			  QDF_MAC_ADDR_REF(&eth_hdr->ether_shost[0]),
486 			  QDF_MAC_ADDR_REF(&eth_hdr->ether_dhost[0]),
487 			  QDF_MAC_ADDR_REF(ap_mac_addr));
488 
489 		qdf_mem_copy(&eth_hdr->ether_dhost, ap_mac_addr,
490 			     QDF_MAC_ADDR_SIZE);
491 	}
492 }
493 #else
dp_fix_broadcast_eapol(struct wlan_dp_link * dp_link,qdf_nbuf_t nbuf)494 static void dp_fix_broadcast_eapol(struct wlan_dp_link *dp_link,
495 				   qdf_nbuf_t nbuf)
496 {
497 }
498 #endif /* HANDLE_BROADCAST_EAPOL_TX_FRAME */
499 
500 #ifdef WLAN_DP_FEATURE_MARK_ICMP_REQ_TO_FW
501 /**
502  * dp_mark_icmp_req_to_fw() - Mark the ICMP request at a certain time interval
503  *			       to be sent to the FW.
504  * @dp_ctx: Global dp context
505  * @nbuf: packet to be transmitted
506  *
507  * This func sets the "to_fw" flag in the packet context block, if the
508  * current packet is an ICMP request packet. This marking is done at a
509  * specific time interval, unless the INI value indicates to disable/enable
510  * this for all frames.
511  *
512  * Return: none
513  */
dp_mark_icmp_req_to_fw(struct wlan_dp_psoc_context * dp_ctx,qdf_nbuf_t nbuf)514 static void dp_mark_icmp_req_to_fw(struct wlan_dp_psoc_context *dp_ctx,
515 				   qdf_nbuf_t nbuf)
516 {
517 	uint64_t curr_time, time_delta;
518 	int time_interval_ms = dp_ctx->dp_cfg.icmp_req_to_fw_mark_interval;
519 	static uint64_t prev_marked_icmp_time;
520 
521 	if (!dp_ctx->dp_cfg.icmp_req_to_fw_mark_interval)
522 		return;
523 
524 	if ((qdf_nbuf_get_icmp_subtype(nbuf) != QDF_PROTO_ICMP_REQ) &&
525 	    (qdf_nbuf_get_icmpv6_subtype(nbuf) != QDF_PROTO_ICMPV6_REQ))
526 		return;
527 
528 	/* Mark all ICMP request to be sent to FW */
529 	if (time_interval_ms == WLAN_CFG_ICMP_REQ_TO_FW_MARK_ALL)
530 		QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf) = 1;
531 
532 	/* For fragment IPV4 ICMP frames
533 	 * only mark last segment once to FW
534 	 */
535 	if (qdf_nbuf_is_ipv4_pkt(nbuf) &&
536 	    qdf_nbuf_is_ipv4_fragment(nbuf))
537 		return;
538 
539 	curr_time = qdf_get_log_timestamp();
540 	time_delta = curr_time - prev_marked_icmp_time;
541 	if (time_delta >= (time_interval_ms *
542 			   QDF_LOG_TIMESTAMP_CYCLES_PER_10_US * 100)) {
543 		QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf) = 1;
544 		prev_marked_icmp_time = curr_time;
545 	}
546 }
547 #else
dp_mark_icmp_req_to_fw(struct wlan_dp_psoc_context * dp_ctx,qdf_nbuf_t nbuf)548 static void dp_mark_icmp_req_to_fw(struct wlan_dp_psoc_context *dp_ctx,
549 				   qdf_nbuf_t nbuf)
550 {
551 }
552 #endif
553 
554 #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
wlan_dp_pkt_add_timestamp(struct wlan_dp_intf * dp_intf,enum qdf_pkt_timestamp_index index,qdf_nbuf_t nbuf)555 void wlan_dp_pkt_add_timestamp(struct wlan_dp_intf *dp_intf,
556 			       enum qdf_pkt_timestamp_index index,
557 			       qdf_nbuf_t nbuf)
558 {
559 	struct wlan_dp_psoc_callbacks *dp_ops;
560 
561 	if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled())) {
562 		uint64_t tsf_time;
563 
564 		dp_ops = &dp_intf->dp_ctx->dp_ops;
565 		dp_ops->dp_get_tsf_time(dp_intf->dev,
566 					qdf_get_log_timestamp(),
567 					&tsf_time);
568 		qdf_add_dp_pkt_timestamp(nbuf, index, tsf_time);
569 	}
570 }
571 #endif
572 
573 QDF_STATUS
dp_start_xmit(struct wlan_dp_link * dp_link,qdf_nbuf_t nbuf)574 dp_start_xmit(struct wlan_dp_link *dp_link, qdf_nbuf_t nbuf)
575 {
576 	struct wlan_dp_intf *dp_intf = dp_link->dp_intf;
577 	struct wlan_dp_psoc_context *dp_ctx = dp_intf->dp_ctx;
578 	struct dp_tx_rx_stats *stats;
579 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
580 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
581 	bool is_arp = false;
582 	bool is_eapol = false;
583 	bool is_dhcp = false;
584 	uint8_t pkt_type;
585 	struct qdf_mac_addr mac_addr_tx_allowed = QDF_MAC_ADDR_ZERO_INIT;
586 	int cpu = qdf_get_smp_processor_id();
587 
588 	stats = &dp_intf->dp_stats.tx_rx_stats;
589 	++stats->per_cpu[cpu].tx_called;
590 	stats->cont_txtimeout_cnt = 0;
591 
592 	if (qdf_unlikely(cds_is_driver_transitioning())) {
593 		dp_err_rl("driver is transitioning, drop pkt");
594 		goto drop_pkt;
595 	}
596 
597 	if (qdf_unlikely(dp_ctx->is_suspend)) {
598 		dp_err_rl("Device is system suspended, drop pkt");
599 		goto drop_pkt;
600 	}
601 
602 	QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(nbuf) = 1;
603 
604 	pkt_type = QDF_NBUF_CB_GET_PACKET_TYPE(nbuf);
605 
606 	if (pkt_type == QDF_NBUF_CB_PACKET_TYPE_ARP) {
607 		if (qdf_nbuf_data_is_arp_req(nbuf) &&
608 		    (dp_intf->track_arp_ip == qdf_nbuf_get_arp_tgt_ip(nbuf))) {
609 			is_arp = true;
610 			++dp_intf->dp_stats.arp_stats.tx_arp_req_count;
611 			dp_info("ARP packet");
612 		}
613 	} else if (pkt_type == QDF_NBUF_CB_PACKET_TYPE_EAPOL) {
614 		subtype = qdf_nbuf_get_eapol_subtype(nbuf);
615 		if (subtype == QDF_PROTO_EAPOL_M2) {
616 			++dp_intf->dp_stats.eapol_stats.eapol_m2_count;
617 			is_eapol = true;
618 		} else if (subtype == QDF_PROTO_EAPOL_M4) {
619 			++dp_intf->dp_stats.eapol_stats.eapol_m4_count;
620 			is_eapol = true;
621 		}
622 	} else if (pkt_type == QDF_NBUF_CB_PACKET_TYPE_DHCP) {
623 		subtype = qdf_nbuf_get_dhcp_subtype(nbuf);
624 		if (subtype == QDF_PROTO_DHCP_DISCOVER) {
625 			++dp_intf->dp_stats.dhcp_stats.dhcp_dis_count;
626 			is_dhcp = true;
627 		} else if (subtype == QDF_PROTO_DHCP_REQUEST) {
628 			++dp_intf->dp_stats.dhcp_stats.dhcp_req_count;
629 			is_dhcp = true;
630 		}
631 	} else if ((pkt_type == QDF_NBUF_CB_PACKET_TYPE_ICMP) ||
632 		   (pkt_type == QDF_NBUF_CB_PACKET_TYPE_ICMPv6)) {
633 		dp_mark_icmp_req_to_fw(dp_ctx, nbuf);
634 	}
635 
636 	wlan_dp_pkt_add_timestamp(dp_intf, QDF_PKT_TX_DRIVER_ENTRY, nbuf);
637 
638 	/* track connectivity stats */
639 	if (dp_intf->pkt_type_bitmap)
640 		dp_tx_rx_collect_connectivity_stats_info(nbuf, dp_link,
641 							 PKT_TYPE_REQ,
642 							 &pkt_type);
643 
644 	dp_get_transmit_mac_addr(dp_link, nbuf, &mac_addr_tx_allowed);
645 	if (qdf_is_macaddr_zero(&mac_addr_tx_allowed)) {
646 		dp_info_rl("tx not allowed, transmit operation suspended");
647 		goto drop_pkt;
648 	}
649 
650 	dp_get_tx_resource(dp_link, &mac_addr_tx_allowed);
651 
652 	if (!qdf_nbuf_ipa_owned_get(nbuf)) {
653 		nbuf = dp_nbuf_orphan(dp_intf, nbuf);
654 		if (!nbuf)
655 			goto drop_pkt_accounting;
656 	}
657 
658 	/*
659 	 * Add SKB to internal tracking table before further processing
660 	 * in WLAN driver.
661 	 */
662 	qdf_net_buf_debug_acquire_skb(nbuf, __FILE__, __LINE__);
663 
664 	qdf_net_stats_add_tx_bytes(&dp_intf->stats, qdf_nbuf_len(nbuf));
665 
666 	if (qdf_nbuf_is_tso(nbuf)) {
667 		qdf_net_stats_add_tx_pkts(&dp_intf->stats,
668 					  qdf_nbuf_get_tso_num_seg(nbuf));
669 	} else {
670 		qdf_net_stats_add_tx_pkts(&dp_intf->stats, 1);
671 		dp_ctx->no_tx_offload_pkt_cnt++;
672 	}
673 
674 	dp_event_eapol_log(nbuf, QDF_TX);
675 	QDF_NBUF_CB_TX_PACKET_TRACK(nbuf) = QDF_NBUF_TX_PKT_DATA_TRACK;
676 	QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_DP);
677 
678 	qdf_dp_trace_set_track(nbuf, QDF_TX);
679 
680 	DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_TX_PACKET_PTR_RECORD,
681 			     QDF_TRACE_DEFAULT_PDEV_ID,
682 			     qdf_nbuf_data_addr(nbuf),
683 			     sizeof(qdf_nbuf_data(nbuf)),
684 			     QDF_TX));
685 
686 	if (!dp_intf_is_tx_allowed(nbuf, dp_link->link_id, soc,
687 				   mac_addr_tx_allowed.bytes)) {
688 		dp_info("Tx not allowed for sta:" QDF_MAC_ADDR_FMT,
689 			QDF_MAC_ADDR_REF(mac_addr_tx_allowed.bytes));
690 		goto drop_pkt_and_release_nbuf;
691 	}
692 
693 	/* check whether need to linearize nbuf, like non-linear udp data */
694 	if (dp_nbuf_nontso_linearize(nbuf) != QDF_STATUS_SUCCESS) {
695 		dp_err_rl(" nbuf %pK linearize failed. drop the pkt", nbuf);
696 		goto drop_pkt_and_release_nbuf;
697 	}
698 
699 	/*
700 	 * If a transmit function is not registered, drop packet
701 	 */
702 	if (!dp_intf->txrx_ops.tx.tx) {
703 		dp_err_rl("TX function not registered by the data path");
704 		goto drop_pkt_and_release_nbuf;
705 	}
706 
707 	dp_fix_broadcast_eapol(dp_link, nbuf);
708 
709 	if (dp_intf->txrx_ops.tx.tx(soc, dp_link->link_id, nbuf)) {
710 		dp_debug_rl("Failed to send packet from adapter %u",
711 			    dp_link->link_id);
712 		goto drop_pkt_and_release_nbuf;
713 	}
714 
715 	return QDF_STATUS_SUCCESS;
716 
717 drop_pkt_and_release_nbuf:
718 	qdf_net_buf_debug_release_skb(nbuf);
719 drop_pkt:
720 
721 	/* track connectivity stats */
722 	if (dp_intf->pkt_type_bitmap)
723 		dp_tx_rx_collect_connectivity_stats_info(nbuf, dp_link,
724 							 PKT_TYPE_TX_DROPPED,
725 							 &pkt_type);
726 	qdf_dp_trace_data_pkt(nbuf, QDF_TRACE_DEFAULT_PDEV_ID,
727 			      QDF_DP_TRACE_DROP_PACKET_RECORD, 0,
728 			      QDF_TX);
729 	qdf_nbuf_kfree(nbuf);
730 
731 drop_pkt_accounting:
732 
733 	qdf_net_stats_inc_tx_dropped(&dp_intf->stats);
734 	++stats->per_cpu[cpu].tx_dropped;
735 	if (is_arp) {
736 		++dp_intf->dp_stats.arp_stats.tx_dropped;
737 		dp_info_rl("ARP packet dropped");
738 	} else if (is_eapol) {
739 		++dp_intf->dp_stats.eapol_stats.
740 				tx_dropped[subtype - QDF_PROTO_EAPOL_M1];
741 	} else if (is_dhcp) {
742 		++dp_intf->dp_stats.dhcp_stats.
743 				tx_dropped[subtype - QDF_PROTO_DHCP_DISCOVER];
744 	}
745 
746 	return QDF_STATUS_E_FAILURE;
747 }
748 
dp_tx_timeout(struct wlan_dp_intf * dp_intf)749 void dp_tx_timeout(struct wlan_dp_intf *dp_intf)
750 {
751 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
752 	u64 diff_time;
753 
754 	cdp_dump_flow_pool_info(soc);
755 
756 	++dp_intf->dp_stats.tx_rx_stats.tx_timeout_cnt;
757 	++dp_intf->dp_stats.tx_rx_stats.cont_txtimeout_cnt;
758 
759 	diff_time = qdf_system_ticks() -
760 		dp_intf->dp_stats.tx_rx_stats.last_txtimeout;
761 
762 	if ((dp_intf->dp_stats.tx_rx_stats.cont_txtimeout_cnt > 1) &&
763 	    (diff_time > (DP_TX_TIMEOUT * 2))) {
764 		/*
765 		 * In case when there is no traffic is running, it may
766 		 * possible tx time-out may once happen and later system
767 		 * recovered then continuous tx timeout count has to be
768 		 * reset as it is gets modified only when traffic is running.
769 		 * If over a period of time if this count reaches to threshold
770 		 * then host triggers a false subsystem restart. In genuine
771 		 * time out case OS will call the tx time-out back to back
772 		 * at interval of DP_TX_TIMEOUT. Here now check if previous
773 		 * TX TIME out has occurred more than twice of DP_TX_TIMEOUT
774 		 * back then host may recovered here from data stall.
775 		 */
776 		dp_intf->dp_stats.tx_rx_stats.cont_txtimeout_cnt = 0;
777 		dp_info("Reset continuous tx timeout stat");
778 	}
779 
780 	dp_intf->dp_stats.tx_rx_stats.last_txtimeout = qdf_system_ticks();
781 
782 	if (dp_intf->dp_stats.tx_rx_stats.cont_txtimeout_cnt >
783 	    DP_TX_STALL_THRESHOLD) {
784 		dp_err("Data stall due to continuous TX timeouts");
785 		dp_intf->dp_stats.tx_rx_stats.cont_txtimeout_cnt = 0;
786 
787 		if (dp_is_data_stall_event_enabled(DP_HOST_STA_TX_TIMEOUT))
788 			cdp_post_data_stall_event(soc,
789 					  DATA_STALL_LOG_INDICATOR_HOST_DRIVER,
790 					  DATA_STALL_LOG_HOST_STA_TX_TIMEOUT,
791 					  OL_TXRX_PDEV_ID, 0xFF,
792 					  DATA_STALL_LOG_RECOVERY_TRIGGER_PDR);
793 	}
794 }
795 
dp_sta_notify_tx_comp_cb(qdf_nbuf_t nbuf,void * ctx,uint16_t flag)796 void dp_sta_notify_tx_comp_cb(qdf_nbuf_t nbuf, void *ctx, uint16_t flag)
797 {
798 	struct wlan_dp_link *dp_link = ctx;
799 	struct wlan_dp_intf *dp_intf = dp_link->dp_intf;
800 	enum qdf_proto_subtype subtype;
801 	struct qdf_mac_addr *dest_mac_addr;
802 	QDF_STATUS status;
803 
804 	if (is_dp_intf_valid(dp_intf))
805 		return;
806 
807 	dest_mac_addr = (struct qdf_mac_addr *)qdf_nbuf_data(nbuf);
808 
809 	switch (QDF_NBUF_CB_GET_PACKET_TYPE(nbuf)) {
810 	case QDF_NBUF_CB_PACKET_TYPE_ARP:
811 		if (flag & BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC))
812 			++dp_intf->dp_stats.arp_stats.
813 				tx_host_fw_sent;
814 		if (flag & BIT(QDF_TX_RX_STATUS_OK))
815 			++dp_intf->dp_stats.arp_stats.tx_ack_cnt;
816 		break;
817 	case QDF_NBUF_CB_PACKET_TYPE_EAPOL:
818 		subtype = qdf_nbuf_get_eapol_subtype(nbuf);
819 		if (!(flag & BIT(QDF_TX_RX_STATUS_OK)) &&
820 		    subtype != QDF_PROTO_INVALID &&
821 		    subtype <= QDF_PROTO_EAPOL_M4)
822 			++dp_intf->dp_stats.eapol_stats.
823 				tx_noack_cnt[subtype - QDF_PROTO_EAPOL_M1];
824 		break;
825 	case QDF_NBUF_CB_PACKET_TYPE_DHCP:
826 		subtype = qdf_nbuf_get_dhcp_subtype(nbuf);
827 		if (!(flag & BIT(QDF_TX_RX_STATUS_OK)) &&
828 		    subtype != QDF_PROTO_INVALID &&
829 		    subtype <= QDF_PROTO_DHCP_ACK)
830 			++dp_intf->dp_stats.dhcp_stats.
831 				tx_noack_cnt[subtype - QDF_PROTO_DHCP_DISCOVER];
832 		break;
833 	default:
834 		break;
835 	}
836 
837 	/* Since it is TDLS call took TDLS vdev ref*/
838 	status = wlan_objmgr_vdev_try_get_ref(dp_link->vdev, WLAN_TDLS_SB_ID);
839 	if (QDF_IS_STATUS_SUCCESS(status)) {
840 		wlan_tdls_update_tx_pkt_cnt(dp_link->vdev, dest_mac_addr);
841 		wlan_objmgr_vdev_release_ref(dp_link->vdev, WLAN_TDLS_SB_ID);
842 	}
843 }
844 
845 #ifdef FEATURE_MONITOR_MODE_SUPPORT
dp_mon_rx_packet_cbk(void * context,qdf_nbuf_t rxbuf)846 QDF_STATUS dp_mon_rx_packet_cbk(void *context, qdf_nbuf_t rxbuf)
847 {
848 	struct wlan_dp_intf *dp_intf;
849 	struct wlan_dp_link *dp_link;
850 	QDF_STATUS status;
851 	qdf_nbuf_t nbuf;
852 	qdf_nbuf_t nbuf_next;
853 	unsigned int cpu_index;
854 	struct dp_tx_rx_stats *stats;
855 	enum dp_nbuf_push_type type;
856 
857 	/* Sanity check on inputs */
858 	if ((!context) || (!rxbuf)) {
859 		dp_err_rl("Null params being passed");
860 		return QDF_STATUS_E_FAILURE;
861 	}
862 
863 	dp_link = (struct wlan_dp_link *)context;
864 	dp_intf = dp_link->dp_intf;
865 	if (!dp_intf) {
866 		dp_err_rl("dp_intf is NULL for dp_link %pK", dp_link);
867 		return QDF_STATUS_E_FAILURE;
868 	}
869 
870 	cpu_index = qdf_get_cpu();
871 	stats = &dp_intf->dp_stats.tx_rx_stats;
872 
873 	/* walk the chain until all are processed */
874 	nbuf =  rxbuf;
875 	while (nbuf) {
876 		nbuf_next = qdf_nbuf_next(nbuf);
877 		qdf_nbuf_set_dev(nbuf, dp_intf->dev);
878 
879 		++stats->per_cpu[cpu_index].rx_packets;
880 		qdf_net_stats_add_rx_pkts(&dp_intf->stats, 1);
881 		qdf_net_stats_add_rx_bytes(&dp_intf->stats,
882 					   qdf_nbuf_len(nbuf));
883 
884 		/* Remove SKB from internal tracking table before submitting
885 		 * it to stack
886 		 */
887 		qdf_net_buf_debug_release_skb(nbuf);
888 
889 		/*
890 		 * If this is not a last packet on the chain
891 		 * Just put packet into backlog queue, not scheduling RX sirq
892 		 */
893 		if (qdf_nbuf_next(nbuf)) {
894 			status = dp_intf->dp_ctx->dp_ops.dp_nbuf_push_pkt(nbuf,
895 							DP_NBUF_PUSH_SIMPLE);
896 		} else {
897 			/*
898 			 * This is the last packet on the chain
899 			 * Scheduling rx sirq
900 			 */
901 			type = qdf_in_atomic() ? DP_NBUF_PUSH_NAPI :
902 						 DP_NBUF_PUSH_BH_DISABLE;
903 			status = dp_intf->dp_ctx->dp_ops.dp_nbuf_push_pkt(nbuf,
904 							type);
905 		}
906 
907 		if (QDF_IS_STATUS_SUCCESS(status))
908 			++stats->per_cpu[cpu_index].rx_delivered;
909 		else
910 			++stats->per_cpu[cpu_index].rx_refused;
911 
912 		nbuf = nbuf_next;
913 	}
914 
915 	return QDF_STATUS_SUCCESS;
916 }
917 
dp_monitor_set_rx_monitor_cb(struct ol_txrx_ops * txrx,ol_txrx_rx_mon_fp rx_monitor_cb)918 void dp_monitor_set_rx_monitor_cb(struct ol_txrx_ops *txrx,
919 				  ol_txrx_rx_mon_fp rx_monitor_cb)
920 {
921 	txrx->rx.mon = rx_monitor_cb;
922 }
923 
dp_rx_monitor_callback(ol_osif_vdev_handle context,qdf_nbuf_t rxbuf,void * rx_status)924 void dp_rx_monitor_callback(ol_osif_vdev_handle context,
925 			    qdf_nbuf_t rxbuf,
926 			    void *rx_status)
927 {
928 	dp_mon_rx_packet_cbk(context, rxbuf);
929 }
930 #endif
931 
932 /**
933  * dp_is_rx_wake_lock_needed() - check if wake lock is needed
934  * @nbuf: pointer to sk_buff
935  *
936  * RX wake lock is needed for:
937  * 1) Unicast data packet OR
938  * 2) Local ARP data packet
939  *
940  * Return: true if wake lock is needed or false otherwise.
941  */
dp_is_rx_wake_lock_needed(qdf_nbuf_t nbuf)942 static bool dp_is_rx_wake_lock_needed(qdf_nbuf_t nbuf)
943 {
944 	if ((!qdf_nbuf_pkt_type_is_mcast(nbuf) &&
945 	     !qdf_nbuf_pkt_type_is_bcast(nbuf)) ||
946 	    qdf_nbuf_is_arp_local(nbuf))
947 		return true;
948 
949 	return false;
950 }
951 
952 #ifdef RECEIVE_OFFLOAD
953 /**
954  * dp_resolve_rx_ol_mode() - Resolve Rx offload method, LRO or GRO
955  * @dp_ctx: pointer to DP psoc Context
956  *
957  * Return: None
958  */
dp_resolve_rx_ol_mode(struct wlan_dp_psoc_context * dp_ctx)959 static void dp_resolve_rx_ol_mode(struct wlan_dp_psoc_context *dp_ctx)
960 {
961 	void *soc;
962 
963 	soc = cds_get_context(QDF_MODULE_ID_SOC);
964 
965 	if (!(cdp_cfg_get(soc, cfg_dp_lro_enable) ^
966 	    cdp_cfg_get(soc, cfg_dp_gro_enable))) {
967 		cdp_cfg_get(soc, cfg_dp_lro_enable) &&
968 			cdp_cfg_get(soc, cfg_dp_gro_enable) ?
969 		dp_info("Can't enable both LRO and GRO, disabling Rx offload"):
970 		dp_info("LRO and GRO both are disabled");
971 		dp_ctx->ol_enable = 0;
972 	} else if (cdp_cfg_get(soc, cfg_dp_lro_enable)) {
973 		dp_info("Rx offload LRO is enabled");
974 		dp_ctx->ol_enable = CFG_LRO_ENABLED;
975 	} else {
976 		dp_info("Rx offload: GRO is enabled");
977 		dp_ctx->ol_enable = CFG_GRO_ENABLED;
978 	}
979 }
980 
981 #ifdef WLAN_FEATURE_DYNAMIC_RX_AGGREGATION
982 /**
983  * dp_gro_rx_bh_disable() - GRO RX/flush function.
984  * @dp_intf: DP interface pointer
985  * @napi_to_use: napi to be used to give packets to the stack, gro flush
986  * @nbuf: pointer to n/w buff
987  *
988  * Function calls napi_gro_receive for the skb. If the skb indicates that a
989  * flush needs to be done (set by the lower DP layer), the function also calls
990  * napi_gro_flush. Local softirqs are disabled (and later enabled) while making
991  * napi_gro__ calls.
992  *
993  * Return: QDF_STATUS_SUCCESS if not dropped by napi_gro_receive or
994  *	   QDF error code.
995  */
dp_gro_rx_bh_disable(struct wlan_dp_intf * dp_intf,qdf_napi_struct * napi_to_use,qdf_nbuf_t nbuf)996 static QDF_STATUS dp_gro_rx_bh_disable(struct wlan_dp_intf *dp_intf,
997 				       qdf_napi_struct *napi_to_use,
998 				       qdf_nbuf_t nbuf)
999 {
1000 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1001 	struct wlan_dp_psoc_context *dp_ctx = dp_intf->dp_ctx;
1002 	uint32_t rx_aggregation;
1003 	uint8_t rx_ctx_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
1004 	uint8_t low_tput_force_flush = 0;
1005 	int32_t gro_disallowed;
1006 
1007 	rx_aggregation = qdf_atomic_read(&dp_ctx->dp_agg_param.rx_aggregation);
1008 	gro_disallowed = qdf_atomic_read(&dp_intf->gro_disallowed);
1009 
1010 	if (dp_get_current_throughput_level(dp_ctx) == PLD_BUS_WIDTH_IDLE ||
1011 	    !rx_aggregation || gro_disallowed) {
1012 		status = dp_ctx->dp_ops.dp_rx_napi_gro_flush(napi_to_use, nbuf,
1013 						   &low_tput_force_flush);
1014 		if (!low_tput_force_flush)
1015 			dp_intf->dp_stats.tx_rx_stats.
1016 					rx_gro_low_tput_flush++;
1017 		if (!rx_aggregation)
1018 			dp_ctx->dp_agg_param.gro_force_flush[rx_ctx_id] = 1;
1019 		if (gro_disallowed)
1020 			dp_intf->gro_flushed[rx_ctx_id] = 1;
1021 	} else {
1022 		status = dp_ctx->dp_ops.dp_rx_napi_gro_receive(napi_to_use,
1023 							      nbuf);
1024 	}
1025 
1026 	return status;
1027 }
1028 
1029 #else /* WLAN_FEATURE_DYNAMIC_RX_AGGREGATION */
1030 
1031 /**
1032  * dp_gro_rx_bh_disable() - GRO RX/flush function.
1033  * @dp_intf: DP interface pointer
1034  * @napi_to_use: napi to be used to give packets to the stack, gro flush
1035  * @nbuf: pointer to nbuff
1036  *
1037  * Function calls napi_gro_receive for the skb. If the skb indicates that a
1038  * flush needs to be done (set by the lower DP layer), the function also calls
1039  * napi_gro_flush. Local softirqs are disabled (and later enabled) while making
1040  * napi_gro__ calls.
1041  *
1042  * Return: QDF_STATUS_SUCCESS if not dropped by napi_gro_receive or
1043  *	   QDF error code.
1044  */
1045 
dp_gro_rx_bh_disable(struct wlan_dp_intf * dp_intf,qdf_napi_struct * napi_to_use,qdf_nbuf_t nbuf)1046 static QDF_STATUS dp_gro_rx_bh_disable(struct wlan_dp_intf *dp_intf,
1047 				       qdf_napi_struct *napi_to_use,
1048 				       qdf_nbuf_t nbuf)
1049 {
1050 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1051 	struct wlan_dp_psoc_context *dp_ctx = dp_intf->dp_ctx;
1052 	uint8_t low_tput_force_flush = 0;
1053 
1054 	if (dp_get_current_throughput_level(dp_ctx) == PLD_BUS_WIDTH_IDLE) {
1055 		status = dp_ctx->dp_ops.dp_rx_napi_gro_flush(napi_to_use, nbuf,
1056 							&low_tput_force_flush);
1057 		if (!low_tput_force_flush)
1058 			dp_intf->dp_stats.tx_rx_stats.
1059 					rx_gro_low_tput_flush++;
1060 	} else {
1061 		status = dp_ctx->dp_ops.dp_rx_napi_gro_receive(napi_to_use,
1062 							      nbuf);
1063 	}
1064 
1065 	return status;
1066 }
1067 #endif /* WLAN_FEATURE_DYNAMIC_RX_AGGREGATION */
1068 
1069 #if defined(FEATURE_LRO)
1070 /**
1071  * dp_lro_rx() - Handle Rx processing via LRO
1072  * @dp_intf: pointer to DP interface
1073  * @nbuf: pointer to n/w buff
1074  *
1075  * Return: QDF_STATUS_SUCCESS if processed via LRO or non zero return code
1076  */
1077 static inline QDF_STATUS
dp_lro_rx(struct wlan_dp_intf * dp_intf,qdf_nbuf_t nbuf)1078 dp_lro_rx(struct wlan_dp_intf *dp_intf, qdf_nbuf_t nbuf)
1079 {
1080 	struct wlan_dp_psoc_context *dp_ctx = dp_intf->dp_ctx;
1081 
1082 	return dp_ctx->dp_ops.dp_lro_rx_cb(dp_intf->dev, nbuf);
1083 }
1084 
1085 /**
1086  * dp_is_lro_enabled() - Is LRO enabled
1087  * @dp_ctx: DP interface
1088  *
1089  * This function checks if LRO is enabled in DP context.
1090  *
1091  * Return: 0 - success, < 0 - failure
1092  */
1093 static inline QDF_STATUS
dp_is_lro_enabled(struct wlan_dp_psoc_context * dp_ctx)1094 dp_is_lro_enabled(struct wlan_dp_psoc_context *dp_ctx)
1095 {
1096 	if (dp_ctx->ol_enable != CFG_LRO_ENABLED)
1097 		return QDF_STATUS_E_NOSUPPORT;
1098 }
1099 
dp_lro_set_reset(struct wlan_dp_intf * dp_intf,uint8_t enable_flag)1100 QDF_STATUS dp_lro_set_reset(struct wlan_dp_intf *dp_intf, uint8_t enable_flag)
1101 {
1102 	struct wlan_dp_psoc_context *dp_ctx = dp_intf->dp_ctx;
1103 
1104 	if ((dp_ctx->ol_enable != CFG_LRO_ENABLED) ||
1105 	    (dp_intf->device_mode != QDF_STA_MODE)) {
1106 		dp_info("LRO is already Disabled");
1107 		return QDF_STATUS_E_INVAL;
1108 	}
1109 
1110 	if (enable_flag) {
1111 		qdf_atomic_set(&dp_ctx->vendor_disable_lro_flag, 0);
1112 	} else {
1113 		/* Disable LRO, Enable tcpdelack*/
1114 		qdf_atomic_set(&dp_ctx->vendor_disable_lro_flag, 1);
1115 		dp_info("LRO Disabled");
1116 
1117 		if (dp_ctx->dp_cfg.enable_tcp_delack) {
1118 			struct wlan_rx_tp_data rx_tp_data;
1119 
1120 			dp_info("Enable TCP delack as LRO is disabled");
1121 			rx_tp_data.rx_tp_flags = TCP_DEL_ACK_IND;
1122 			rx_tp_data.level =
1123 				DP_BUS_BW_CFG(dp_ctx->dp_cfg.cur_rx_level);
1124 			wlan_dp_update_tcp_rx_param(dp_ctx, &rx_tp_data);
1125 			dp_ctx->en_tcp_delack_no_lro = 1;
1126 		}
1127 	}
1128 
1129 	return QDF_STATUS_SUCCESS;
1130 }
1131 #else
1132 static inline
dp_lro_rx(struct wlan_dp_intf * dp_intf,qdf_nbuf_t nbuf)1133 QDF_STATUS dp_lro_rx(struct wlan_dp_intf *dp_intf,
1134 		     qdf_nbuf_t nbuf)
1135 {
1136 	return QDF_STATUS_E_NOSUPPORT;
1137 }
1138 
1139 static inline
dp_is_lro_enabled(struct wlan_dp_psoc_context * dp_ctx)1140 int dp_is_lro_enabled(struct wlan_dp_psoc_context *dp_ctx)
1141 {
1142 	return QDF_STATUS_E_NOSUPPORT;
1143 }
1144 #endif /* FEATURE_LRO */
1145 
1146 /**
1147  * dp_gro_rx_thread() - Handle Rx processing via GRO for DP thread
1148  * @dp_intf: pointer to DP interface
1149  * @nbuf: pointer to n/w buff
1150  *
1151  * Return: QDF_STATUS_SUCCESS if processed via GRO or non zero return code
1152  */
1153 static
dp_gro_rx_thread(struct wlan_dp_intf * dp_intf,qdf_nbuf_t nbuf)1154 QDF_STATUS dp_gro_rx_thread(struct wlan_dp_intf *dp_intf,
1155 			    qdf_nbuf_t nbuf)
1156 {
1157 	qdf_napi_struct *napi_to_use = NULL;
1158 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
1159 
1160 	if (!dp_intf->dp_ctx->enable_dp_rx_threads) {
1161 		dp_err_rl("gro not supported without DP RX thread!");
1162 		return status;
1163 	}
1164 
1165 	napi_to_use =
1166 		(qdf_napi_struct *)dp_rx_get_napi_context(cds_get_context(QDF_MODULE_ID_SOC),
1167 				       QDF_NBUF_CB_RX_CTX_ID(nbuf));
1168 
1169 	if (!napi_to_use) {
1170 		dp_err_rl("no napi to use for GRO!");
1171 		return status;
1172 	}
1173 
1174 	return dp_gro_rx_bh_disable(dp_intf, napi_to_use, nbuf);
1175 }
1176 
1177 /**
1178  * dp_gro_rx_legacy() - Handle Rx processing via GRO for ihelium based targets
1179  * @dp_intf: pointer to DP interface
1180  * @nbuf: pointer to n/w buf
1181  *
1182  * Supports GRO for only station mode
1183  *
1184  * Return: QDF_STATUS_SUCCESS if processed via GRO or non zero return code
1185  */
1186 static
dp_gro_rx_legacy(struct wlan_dp_intf * dp_intf,qdf_nbuf_t nbuf)1187 QDF_STATUS dp_gro_rx_legacy(struct wlan_dp_intf *dp_intf, qdf_nbuf_t nbuf)
1188 {
1189 	qdf_napi_struct *napi_to_use;
1190 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
1191 	struct wlan_dp_psoc_context *dp_ctx = dp_intf->dp_ctx;
1192 
1193 	/* Only enabling it for STA mode like LRO today */
1194 	if (QDF_STA_MODE != dp_intf->device_mode)
1195 		return QDF_STATUS_E_NOSUPPORT;
1196 
1197 	if (qdf_atomic_read(&dp_ctx->disable_rx_ol_in_low_tput) ||
1198 	    qdf_atomic_read(&dp_ctx->disable_rx_ol_in_concurrency))
1199 		return QDF_STATUS_E_NOSUPPORT;
1200 
1201 	napi_to_use = dp_ctx->dp_ops.dp_gro_rx_legacy_get_napi(nbuf,
1202 						dp_ctx->enable_rxthread);
1203 	if (!napi_to_use)
1204 		goto out;
1205 
1206 	status = dp_gro_rx_bh_disable(dp_intf, napi_to_use, nbuf);
1207 out:
1208 
1209 	return status;
1210 }
1211 
1212 /**
1213  * dp_register_rx_ol_cb() - Register LRO/GRO rx processing callbacks
1214  * @dp_ctx: pointer to dp_ctx
1215  * @wifi3_0_target: whether its a lithium/beryllium arch based target or not
1216  *
1217  * Return: none
1218  */
dp_register_rx_ol_cb(struct wlan_dp_psoc_context * dp_ctx,bool wifi3_0_target)1219 static void dp_register_rx_ol_cb(struct wlan_dp_psoc_context *dp_ctx,
1220 				 bool wifi3_0_target)
1221 {
1222 	if  (!dp_ctx) {
1223 		dp_err("DP context is NULL");
1224 		return;
1225 	}
1226 
1227 	dp_ctx->en_tcp_delack_no_lro = 0;
1228 
1229 	if (!dp_is_lro_enabled(dp_ctx)) {
1230 		dp_ctx->dp_ops.dp_register_rx_offld_flush_cb(DP_RX_FLUSH_LRO);
1231 		dp_ctx->receive_offload_cb = dp_lro_rx;
1232 		dp_info("LRO is enabled");
1233 	} else if (dp_ctx->ol_enable == CFG_GRO_ENABLED) {
1234 		qdf_atomic_set(&dp_ctx->dp_agg_param.rx_aggregation, 1);
1235 		if (wifi3_0_target) {
1236 		/* no flush registration needed, it happens in DP thread */
1237 			dp_ctx->receive_offload_cb = dp_gro_rx_thread;
1238 		} else {
1239 			/*ihelium based targets */
1240 			if (dp_ctx->enable_rxthread)
1241 				dp_ctx->dp_ops.dp_register_rx_offld_flush_cb(
1242 							DP_RX_FLUSH_THREAD);
1243 			else
1244 				dp_ctx->dp_ops.dp_register_rx_offld_flush_cb(
1245 							DP_RX_FLUSH_NAPI);
1246 			dp_ctx->receive_offload_cb = dp_gro_rx_legacy;
1247 		}
1248 		dp_info("GRO is enabled");
1249 	} else if (DP_BUS_BW_CFG(dp_ctx->dp_cfg.enable_tcp_delack)) {
1250 		dp_ctx->en_tcp_delack_no_lro = 1;
1251 		dp_info("TCP Del ACK is enabled");
1252 	}
1253 }
1254 
1255 /**
1256  * dp_rx_ol_send_config() - Send RX offload configuration to FW
1257  * @dp_ctx: pointer to DP_ctx
1258  *
1259  * This function is only used for non lithium targets. Lithium based targets are
1260  * sending LRO config to FW in vdev attach implemented in cmn DP layer.
1261  *
1262  * Return: 0 on success, non zero on failure
1263  */
dp_rx_ol_send_config(struct wlan_dp_psoc_context * dp_ctx)1264 static QDF_STATUS dp_rx_ol_send_config(struct wlan_dp_psoc_context *dp_ctx)
1265 {
1266 	struct cdp_lro_hash_config lro_config = {0};
1267 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1268 
1269 	/*
1270 	 * This will enable flow steering and Toeplitz hash
1271 	 * So enable it for LRO or GRO processing.
1272 	 */
1273 	if (dp_ctx->dp_cfg.gro_enable ||
1274 	    dp_ctx->dp_cfg.lro_enable) {
1275 		lro_config.lro_enable = 1;
1276 		lro_config.tcp_flag = QDF_TCPHDR_ACK;
1277 		lro_config.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
1278 					   QDF_TCPHDR_RST | QDF_TCPHDR_ACK |
1279 					   QDF_TCPHDR_URG | QDF_TCPHDR_ECE |
1280 					   QDF_TCPHDR_CWR;
1281 	}
1282 
1283 	qdf_get_random_bytes(lro_config.toeplitz_hash_ipv4,
1284 			     (sizeof(lro_config.toeplitz_hash_ipv4[0]) *
1285 			      LRO_IPV4_SEED_ARR_SZ));
1286 
1287 	qdf_get_random_bytes(lro_config.toeplitz_hash_ipv6,
1288 			     (sizeof(lro_config.toeplitz_hash_ipv6[0]) *
1289 			      LRO_IPV6_SEED_ARR_SZ));
1290 
1291 	status = dp_ctx->sb_ops.dp_lro_config_cmd(dp_ctx->psoc, &lro_config);
1292 	dp_info("LRO Config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
1293 		lro_config.lro_enable, lro_config.tcp_flag,
1294 		lro_config.tcp_flag_mask);
1295 
1296 	return status;
1297 }
1298 
dp_rx_ol_init(struct wlan_dp_psoc_context * dp_ctx,bool is_wifi3_0_target)1299 QDF_STATUS dp_rx_ol_init(struct wlan_dp_psoc_context *dp_ctx,
1300 			 bool is_wifi3_0_target)
1301 {
1302 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1303 
1304 	dp_resolve_rx_ol_mode(dp_ctx);
1305 	dp_register_rx_ol_cb(dp_ctx, is_wifi3_0_target);
1306 
1307 	dp_info("ol init");
1308 	if (!is_wifi3_0_target) {
1309 		status = dp_rx_ol_send_config(dp_ctx);
1310 		if (status) {
1311 			dp_ctx->ol_enable = 0;
1312 			dp_err("Failed to send LRO/GRO configuration! %u", status);
1313 			return status;
1314 		}
1315 	}
1316 
1317 	return 0;
1318 }
1319 
dp_disable_rx_ol_for_low_tput(struct wlan_dp_psoc_context * dp_ctx,bool disable)1320 void dp_disable_rx_ol_for_low_tput(struct wlan_dp_psoc_context *dp_ctx,
1321 				   bool disable)
1322 {
1323 	if (disable)
1324 		qdf_atomic_set(&dp_ctx->disable_rx_ol_in_low_tput, 1);
1325 	else
1326 		qdf_atomic_set(&dp_ctx->disable_rx_ol_in_low_tput, 0);
1327 }
1328 
1329 #else /* RECEIVE_OFFLOAD */
dp_disable_rx_ol_for_low_tput(struct wlan_dp_psoc_context * dp_ctx,bool disable)1330 void dp_disable_rx_ol_for_low_tput(struct wlan_dp_psoc_context *dp_ctx,
1331 				   bool disable)
1332 {
1333 }
1334 #endif /* RECEIVE_OFFLOAD */
1335 
1336 #ifdef WLAN_FEATURE_TSF_PLUS_SOCK_TS
dp_tsf_timestamp_rx(struct wlan_dp_psoc_context * dp_ctx,qdf_nbuf_t netbuf)1337 static inline void dp_tsf_timestamp_rx(struct wlan_dp_psoc_context *dp_ctx,
1338 				       qdf_nbuf_t netbuf)
1339 {
1340 	dp_ctx->dp_ops.dp_tsf_timestamp_rx(dp_ctx->dp_ops.callback_ctx,
1341 					   netbuf);
1342 }
1343 #else
dp_tsf_timestamp_rx(struct wlan_dp_psoc_context * dp_ctx,qdf_nbuf_t netbuf)1344 static inline void dp_tsf_timestamp_rx(struct wlan_dp_psoc_context *dp_ctx,
1345 				       qdf_nbuf_t netbuf)
1346 {
1347 }
1348 #endif
1349 
1350 QDF_STATUS
dp_rx_thread_gro_flush_ind_cbk(void * link_ctx,int rx_ctx_id)1351 dp_rx_thread_gro_flush_ind_cbk(void *link_ctx, int rx_ctx_id)
1352 {
1353 	struct wlan_dp_link *dp_link = link_ctx;
1354 	struct wlan_dp_intf *dp_intf;
1355 	enum dp_rx_gro_flush_code gro_flush_code = DP_RX_GRO_NORMAL_FLUSH;
1356 
1357 	if (qdf_unlikely((!dp_link) || (!dp_link->dp_intf) ||
1358 			 (!dp_link->dp_intf->dp_ctx))) {
1359 		dp_err("Null params being passed");
1360 		return QDF_STATUS_E_FAILURE;
1361 	}
1362 
1363 	dp_intf = dp_link->dp_intf;
1364 	if (dp_intf->runtime_disable_rx_thread)
1365 		return QDF_STATUS_SUCCESS;
1366 
1367 	if (dp_is_low_tput_gro_enable(dp_intf->dp_ctx)) {
1368 		dp_intf->dp_stats.tx_rx_stats.rx_gro_flush_skip++;
1369 		gro_flush_code = DP_RX_GRO_LOW_TPUT_FLUSH;
1370 	}
1371 
1372 	return dp_rx_gro_flush_ind(cds_get_context(QDF_MODULE_ID_SOC),
1373 				   rx_ctx_id, gro_flush_code);
1374 }
1375 
dp_rx_pkt_thread_enqueue_cbk(void * link_ctx,qdf_nbuf_t nbuf_list)1376 QDF_STATUS dp_rx_pkt_thread_enqueue_cbk(void *link_ctx,
1377 					qdf_nbuf_t nbuf_list)
1378 {
1379 	struct wlan_dp_intf *dp_intf;
1380 	struct wlan_dp_link *dp_link;
1381 	uint8_t link_id;
1382 	qdf_nbuf_t head_ptr;
1383 
1384 	if (qdf_unlikely(!link_ctx || !nbuf_list)) {
1385 		dp_err_rl("Null params being passed");
1386 		return QDF_STATUS_E_FAILURE;
1387 	}
1388 
1389 	dp_link = (struct wlan_dp_link *)link_ctx;
1390 	if (!is_dp_link_valid(dp_link))
1391 		return QDF_STATUS_E_FAILURE;
1392 
1393 	dp_intf = dp_link->dp_intf;
1394 	if (dp_intf->runtime_disable_rx_thread &&
1395 	    dp_intf->txrx_ops.rx.rx_stack)
1396 		return dp_intf->txrx_ops.rx.rx_stack(dp_link, nbuf_list);
1397 
1398 	link_id = dp_link->link_id;
1399 
1400 	head_ptr = nbuf_list;
1401 	while (head_ptr) {
1402 		qdf_nbuf_cb_update_vdev_id(head_ptr,
1403 					   link_id);
1404 		head_ptr = qdf_nbuf_next(head_ptr);
1405 	}
1406 
1407 	return dp_rx_enqueue_pkt(cds_get_context(QDF_MODULE_ID_SOC), nbuf_list);
1408 }
1409 
1410 #ifdef CONFIG_HL_SUPPORT
wlan_dp_rx_deliver_to_stack(struct wlan_dp_intf * dp_intf,qdf_nbuf_t nbuf)1411 QDF_STATUS wlan_dp_rx_deliver_to_stack(struct wlan_dp_intf *dp_intf,
1412 				       qdf_nbuf_t nbuf)
1413 {
1414 	struct wlan_dp_psoc_context *dp_ctx = dp_intf->dp_ctx;
1415 
1416 	dp_intf->dp_stats.tx_rx_stats.rx_non_aggregated++;
1417 	dp_ctx->no_rx_offload_pkt_cnt++;
1418 
1419 	return dp_ctx->dp_ops.dp_nbuf_push_pkt(nbuf, DP_NBUF_PUSH_NI);
1420 }
1421 #else
1422 
1423 #if defined(WLAN_SUPPORT_RX_FISA)
1424 /**
1425  * wlan_dp_set_fisa_disallowed_for_intf() - Set fisa disallowed bit for an intf
1426  * @soc: DP soc handle
1427  * @dp_intf: DP interface handle
1428  * @rx_ctx_id: rx context id
1429  * @val: Enable or disable
1430  *
1431  * The function sets the fisa disallowed flag for a given vdev
1432  *
1433  * Return: None
1434  */
1435 static inline
wlan_dp_set_fisa_disallowed_for_intf(ol_txrx_soc_handle soc,struct wlan_dp_intf * dp_intf,uint8_t rx_ctx_id,uint8_t val)1436 void wlan_dp_set_fisa_disallowed_for_intf(ol_txrx_soc_handle soc,
1437 					  struct wlan_dp_intf *dp_intf,
1438 					  uint8_t rx_ctx_id, uint8_t val)
1439 {
1440 	dp_intf->fisa_disallowed[rx_ctx_id] = val;
1441 }
1442 #else
1443 static inline
wlan_dp_set_fisa_disallowed_for_intf(ol_txrx_soc_handle soc,struct wlan_dp_intf * dp_intf,uint8_t rx_ctx_id,uint8_t val)1444 void wlan_dp_set_fisa_disallowed_for_intf(ol_txrx_soc_handle soc,
1445 					  struct wlan_dp_intf *dp_intf,
1446 					  uint8_t rx_ctx_id, uint8_t val)
1447 {
1448 }
1449 #endif
1450 
1451 #ifdef WLAN_FEATURE_DYNAMIC_RX_AGGREGATION
wlan_dp_rx_deliver_to_stack(struct wlan_dp_intf * dp_intf,qdf_nbuf_t nbuf)1452 QDF_STATUS wlan_dp_rx_deliver_to_stack(struct wlan_dp_intf *dp_intf,
1453 				       qdf_nbuf_t nbuf)
1454 {
1455 	struct wlan_dp_psoc_context *dp_ctx = dp_intf->dp_ctx;
1456 	struct wlan_dp_psoc_callbacks *dp_ops = &dp_ctx->dp_ops;
1457 	int status = QDF_STATUS_E_FAILURE;
1458 	bool nbuf_receive_offload_ok = false;
1459 	enum dp_nbuf_push_type push_type;
1460 	uint8_t rx_ctx_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
1461 	ol_txrx_soc_handle soc = cds_get_context(QDF_MODULE_ID_SOC);
1462 	int32_t gro_disallowed;
1463 
1464 	if (QDF_NBUF_CB_RX_TCP_PROTO(nbuf) &&
1465 	    !QDF_NBUF_CB_RX_PEER_CACHED_FRM(nbuf))
1466 		nbuf_receive_offload_ok = true;
1467 
1468 	gro_disallowed = qdf_atomic_read(&dp_intf->gro_disallowed);
1469 	if (gro_disallowed == 0 &&
1470 	    dp_intf->gro_flushed[rx_ctx_id] != 0) {
1471 		if (qdf_likely(soc))
1472 			wlan_dp_set_fisa_disallowed_for_intf(soc, dp_intf,
1473 							     rx_ctx_id, 0);
1474 		dp_intf->gro_flushed[rx_ctx_id] = 0;
1475 	} else if (gro_disallowed &&
1476 		   dp_intf->gro_flushed[rx_ctx_id] == 0) {
1477 		if (qdf_likely(soc))
1478 			wlan_dp_set_fisa_disallowed_for_intf(soc, dp_intf,
1479 							     rx_ctx_id, 1);
1480 	}
1481 
1482 	if (nbuf_receive_offload_ok && dp_ctx->receive_offload_cb &&
1483 	    !dp_ctx->dp_agg_param.gro_force_flush[rx_ctx_id] &&
1484 	    !dp_intf->gro_flushed[rx_ctx_id] &&
1485 	    !dp_intf->runtime_disable_rx_thread) {
1486 		status = dp_ctx->receive_offload_cb(dp_intf, nbuf);
1487 
1488 		if (QDF_IS_STATUS_SUCCESS(status)) {
1489 			dp_intf->dp_stats.tx_rx_stats.rx_aggregated++;
1490 			return status;
1491 		}
1492 
1493 		if (status == QDF_STATUS_E_GRO_DROP) {
1494 			dp_intf->dp_stats.tx_rx_stats.rx_gro_dropped++;
1495 			return status;
1496 		}
1497 	}
1498 
1499 	/*
1500 	 * The below case handles the scenario when rx_aggregation is
1501 	 * re-enabled dynamically, in which case gro_force_flush needs
1502 	 * to be reset to 0 to allow GRO.
1503 	 */
1504 	if (qdf_atomic_read(&dp_ctx->dp_agg_param.rx_aggregation) &&
1505 	    dp_ctx->dp_agg_param.gro_force_flush[rx_ctx_id])
1506 		dp_ctx->dp_agg_param.gro_force_flush[rx_ctx_id] = 0;
1507 
1508 	dp_intf->dp_stats.tx_rx_stats.rx_non_aggregated++;
1509 
1510 	/* Account for GRO/LRO ineligible packets, mostly UDP */
1511 	if (qdf_nbuf_get_gso_segs(nbuf) == 0)
1512 		dp_ctx->no_rx_offload_pkt_cnt++;
1513 
1514 	if (qdf_likely((dp_ctx->enable_dp_rx_threads ||
1515 			dp_ctx->enable_rxthread) &&
1516 		       !dp_intf->runtime_disable_rx_thread)) {
1517 		push_type = DP_NBUF_PUSH_BH_DISABLE;
1518 	} else if (qdf_unlikely(QDF_NBUF_CB_RX_PEER_CACHED_FRM(nbuf))) {
1519 		/*
1520 		 * Frames before peer is registered to avoid contention with
1521 		 * NAPI softirq.
1522 		 * Refer fix:
1523 		 * qcacld-3.0: Do netif_rx_ni() for frames received before
1524 		 * peer assoc
1525 		 */
1526 		push_type = DP_NBUF_PUSH_NI;
1527 	} else { /* NAPI Context */
1528 		push_type = DP_NBUF_PUSH_NAPI;
1529 	}
1530 
1531 	return dp_ops->dp_nbuf_push_pkt(nbuf, push_type);
1532 }
1533 
1534 #else /* WLAN_FEATURE_DYNAMIC_RX_AGGREGATION */
1535 
wlan_dp_rx_deliver_to_stack(struct wlan_dp_intf * dp_intf,qdf_nbuf_t nbuf)1536 QDF_STATUS wlan_dp_rx_deliver_to_stack(struct wlan_dp_intf *dp_intf,
1537 				       qdf_nbuf_t nbuf)
1538 {
1539 	struct wlan_dp_psoc_context *dp_ctx = dp_intf->dp_ctx;
1540 	struct wlan_dp_psoc_callbacks *dp_ops = &dp_ctx->dp_ops;
1541 	int status = QDF_STATUS_E_FAILURE;
1542 	bool nbuf_receive_offload_ok = false;
1543 	enum dp_nbuf_push_type push_type;
1544 
1545 	if (QDF_NBUF_CB_RX_TCP_PROTO(nbuf) &&
1546 	    !QDF_NBUF_CB_RX_PEER_CACHED_FRM(nbuf))
1547 		nbuf_receive_offload_ok = true;
1548 
1549 	if (nbuf_receive_offload_ok && dp_ctx->receive_offload_cb) {
1550 		status = dp_ctx->receive_offload_cb(dp_intf, nbuf);
1551 
1552 		if (QDF_IS_STATUS_SUCCESS(status)) {
1553 			dp_intf->dp_stats.tx_rx_stats.rx_aggregated++;
1554 			return status;
1555 		}
1556 
1557 		if (status == QDF_STATUS_E_GRO_DROP) {
1558 			dp_intf->dp_stats.tx_rx_stats.rx_gro_dropped++;
1559 			return status;
1560 		}
1561 	}
1562 
1563 	dp_intf->dp_stats.tx_rx_stats.rx_non_aggregated++;
1564 
1565 	/* Account for GRO/LRO ineligible packets, mostly UDP */
1566 	if (qdf_nbuf_get_gso_segs(nbuf) == 0)
1567 		dp_ctx->no_rx_offload_pkt_cnt++;
1568 
1569 	if (qdf_likely((dp_ctx->enable_dp_rx_threads ||
1570 			dp_ctx->enable_rxthread) &&
1571 		       !dp_intf->runtime_disable_rx_thread)) {
1572 		push_type = DP_NBUF_PUSH_BH_DISABLE;
1573 	} else if (qdf_unlikely(QDF_NBUF_CB_RX_PEER_CACHED_FRM(nbuf))) {
1574 		/*
1575 		 * Frames before peer is registered to avoid contention with
1576 		 * NAPI softirq.
1577 		 * Refer fix:
1578 		 * qcacld-3.0: Do netif_rx_ni() for frames received before
1579 		 * peer assoc
1580 		 */
1581 		push_type = DP_NBUF_PUSH_NI;
1582 	} else { /* NAPI Context */
1583 		push_type = DP_NBUF_PUSH_NAPI;
1584 	}
1585 
1586 	return dp_ops->dp_nbuf_push_pkt(nbuf, push_type);
1587 }
1588 #endif /* WLAN_FEATURE_DYNAMIC_RX_AGGREGATION */
1589 #endif
1590 
1591 static inline bool
dp_is_gratuitous_arp_unsolicited_na(struct wlan_dp_psoc_context * dp_ctx,qdf_nbuf_t nbuf)1592 dp_is_gratuitous_arp_unsolicited_na(struct wlan_dp_psoc_context *dp_ctx,
1593 				    qdf_nbuf_t nbuf)
1594 {
1595 	if (qdf_unlikely(dp_ctx->dp_ops.dp_is_gratuitous_arp_unsolicited_na))
1596 		return dp_ctx->dp_ops.dp_is_gratuitous_arp_unsolicited_na(nbuf);
1597 
1598 	return false;
1599 }
1600 
dp_rx_flush_packet_cbk(void * dp_link_context,uint8_t link_id)1601 QDF_STATUS dp_rx_flush_packet_cbk(void *dp_link_context, uint8_t link_id)
1602 {
1603 	struct wlan_dp_link *dp_link = (struct wlan_dp_link *)dp_link_context;
1604 	struct wlan_dp_intf *dp_intf = dp_link->dp_intf;
1605 	struct wlan_dp_psoc_context *dp_ctx;
1606 	ol_txrx_soc_handle soc = cds_get_context(QDF_MODULE_ID_SOC);
1607 
1608 	if (qdf_unlikely(!soc))
1609 		return QDF_STATUS_E_FAILURE;
1610 
1611 	dp_ctx = dp_intf->dp_ctx;
1612 	if (qdf_unlikely(!dp_ctx))
1613 		return QDF_STATUS_E_FAILURE;
1614 
1615 	qdf_atomic_inc(&dp_intf->num_active_task);
1616 
1617 	/* do fisa flush for this vdev */
1618 	if (wlan_dp_cfg_is_rx_fisa_enabled(&dp_ctx->dp_cfg))
1619 		wlan_dp_rx_fisa_flush_by_vdev_id((struct dp_soc *)soc, link_id);
1620 
1621 	if (dp_ctx->enable_dp_rx_threads)
1622 		dp_txrx_flush_pkts_by_vdev_id(soc, link_id);
1623 
1624 	qdf_atomic_dec(&dp_intf->num_active_task);
1625 
1626 	return QDF_STATUS_SUCCESS;
1627 }
1628 
1629 #if defined(WLAN_SUPPORT_RX_FISA)
wlan_dp_rx_fisa_cbk(void * dp_soc,void * dp_vdev,qdf_nbuf_t nbuf_list)1630 QDF_STATUS wlan_dp_rx_fisa_cbk(void *dp_soc,
1631 			       void *dp_vdev, qdf_nbuf_t nbuf_list)
1632 {
1633 	struct wlan_dp_psoc_context *dp_ctx = dp_get_context();
1634 
1635 	return dp_fisa_rx(dp_ctx, dp_vdev, nbuf_list);
1636 }
1637 
wlan_dp_rx_fisa_flush_by_ctx_id(void * dp_soc,int ring_num)1638 QDF_STATUS wlan_dp_rx_fisa_flush_by_ctx_id(void *dp_soc, int ring_num)
1639 {
1640 	return dp_rx_fisa_flush_by_ctx_id((struct dp_soc *)dp_soc, ring_num);
1641 }
1642 
wlan_dp_rx_fisa_flush_by_vdev_id(void * dp_soc,uint8_t vdev_id)1643 QDF_STATUS wlan_dp_rx_fisa_flush_by_vdev_id(void *dp_soc, uint8_t vdev_id)
1644 {
1645 	return dp_rx_fisa_flush_by_vdev_id((struct dp_soc *)dp_soc, vdev_id);
1646 }
1647 #endif
1648 
dp_rx_packet_cbk(void * dp_link_context,qdf_nbuf_t rxBuf)1649 QDF_STATUS dp_rx_packet_cbk(void *dp_link_context,
1650 			    qdf_nbuf_t rxBuf)
1651 {
1652 	struct wlan_dp_intf *dp_intf = NULL;
1653 	struct wlan_dp_link *dp_link = NULL;
1654 	struct wlan_dp_psoc_context *dp_ctx = NULL;
1655 	QDF_STATUS qdf_status = QDF_STATUS_E_FAILURE;
1656 	qdf_nbuf_t nbuf = NULL;
1657 	qdf_nbuf_t next = NULL;
1658 	unsigned int cpu_index;
1659 	struct qdf_mac_addr *mac_addr, *dest_mac_addr;
1660 	bool wake_lock = false;
1661 	bool track_arp = false;
1662 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
1663 	bool is_eapol, send_over_nl;
1664 	bool is_dhcp;
1665 	struct dp_tx_rx_stats *stats;
1666 	QDF_STATUS status;
1667 	uint8_t pkt_type;
1668 
1669 	/* Sanity check on inputs */
1670 	if (qdf_unlikely((!dp_link_context) || (!rxBuf))) {
1671 		dp_err_rl("Null params being passed");
1672 		return QDF_STATUS_E_FAILURE;
1673 	}
1674 
1675 	dp_link = (struct wlan_dp_link *)dp_link_context;
1676 	dp_intf = dp_link->dp_intf;
1677 	dp_ctx = dp_intf->dp_ctx;
1678 
1679 	cpu_index = qdf_get_cpu();
1680 	stats = &dp_intf->dp_stats.tx_rx_stats;
1681 
1682 	next = rxBuf;
1683 
1684 	while (next) {
1685 		nbuf = next;
1686 		next = qdf_nbuf_next(nbuf);
1687 		qdf_nbuf_set_next(nbuf, NULL);
1688 		is_eapol = false;
1689 		is_dhcp = false;
1690 		send_over_nl = false;
1691 
1692 		if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1693 			if (qdf_nbuf_data_is_arp_rsp(nbuf) &&
1694 			    (dp_intf->track_arp_ip ==
1695 			     qdf_nbuf_get_arp_src_ip(nbuf))) {
1696 				++dp_intf->dp_stats.arp_stats.
1697 					rx_arp_rsp_count;
1698 				dp_debug("ARP packet received");
1699 				track_arp = true;
1700 			}
1701 		} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) {
1702 			subtype = qdf_nbuf_get_eapol_subtype(nbuf);
1703 			send_over_nl = true;
1704 
1705 			/* Mac address check between RX packet DA and dp_intf's */
1706 			dp_rx_pkt_da_check(dp_intf, nbuf);
1707 			if (subtype == QDF_PROTO_EAPOL_M1) {
1708 				++dp_intf->dp_stats.eapol_stats.
1709 						eapol_m1_count;
1710 				is_eapol = true;
1711 			} else if (subtype == QDF_PROTO_EAPOL_M3) {
1712 				++dp_intf->dp_stats.eapol_stats.
1713 						eapol_m3_count;
1714 				is_eapol = true;
1715 			}
1716 		} else if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1717 			subtype = qdf_nbuf_get_dhcp_subtype(nbuf);
1718 			if (subtype == QDF_PROTO_DHCP_OFFER) {
1719 				++dp_intf->dp_stats.dhcp_stats.
1720 						dhcp_off_count;
1721 				is_dhcp = true;
1722 			} else if (subtype == QDF_PROTO_DHCP_ACK) {
1723 				++dp_intf->dp_stats.dhcp_stats.
1724 						dhcp_ack_count;
1725 				is_dhcp = true;
1726 			}
1727 		}
1728 
1729 		wlan_dp_pkt_add_timestamp(dp_intf, QDF_PKT_RX_DRIVER_EXIT,
1730 					  nbuf);
1731 
1732 		/* track connectivity stats */
1733 		if (dp_intf->pkt_type_bitmap)
1734 			dp_tx_rx_collect_connectivity_stats_info(nbuf, dp_link,
1735 								 PKT_TYPE_RSP,
1736 								 &pkt_type);
1737 
1738 		if ((dp_link->conn_info.proxy_arp_service) &&
1739 		    dp_is_gratuitous_arp_unsolicited_na(dp_ctx, nbuf)) {
1740 			qdf_atomic_inc(&stats->rx_usolict_arp_n_mcast_drp);
1741 			/* Remove SKB from internal tracking table before
1742 			 * submitting it to stack.
1743 			 */
1744 			qdf_nbuf_free(nbuf);
1745 			continue;
1746 		}
1747 
1748 		dp_event_eapol_log(nbuf, QDF_RX);
1749 		qdf_dp_trace_log_pkt(dp_link->link_id, nbuf, QDF_RX,
1750 				     QDF_TRACE_DEFAULT_PDEV_ID,
1751 				     dp_intf->device_mode);
1752 
1753 		DPTRACE(qdf_dp_trace(nbuf,
1754 				     QDF_DP_TRACE_RX_PACKET_PTR_RECORD,
1755 				     QDF_TRACE_DEFAULT_PDEV_ID,
1756 				     qdf_nbuf_data_addr(nbuf),
1757 				     sizeof(qdf_nbuf_data(nbuf)), QDF_RX));
1758 
1759 		DPTRACE(qdf_dp_trace_data_pkt(nbuf, QDF_TRACE_DEFAULT_PDEV_ID,
1760 					      QDF_DP_TRACE_RX_PACKET_RECORD,
1761 					      0, QDF_RX));
1762 
1763 		dest_mac_addr = (struct qdf_mac_addr *)(qdf_nbuf_data(nbuf) +
1764 						QDF_NBUF_DEST_MAC_OFFSET);
1765 		mac_addr = (struct qdf_mac_addr *)(qdf_nbuf_data(nbuf) +
1766 						   QDF_NBUF_SRC_MAC_OFFSET);
1767 
1768 		status = wlan_objmgr_vdev_try_get_ref(dp_link->vdev,
1769 						      WLAN_TDLS_SB_ID);
1770 		if (QDF_IS_STATUS_SUCCESS(status)) {
1771 			wlan_tdls_update_rx_pkt_cnt(dp_link->vdev, mac_addr,
1772 						    dest_mac_addr);
1773 			wlan_objmgr_vdev_release_ref(dp_link->vdev,
1774 						     WLAN_TDLS_SB_ID);
1775 		}
1776 
1777 		if (dp_rx_pkt_tracepoints_enabled())
1778 			qdf_trace_dp_packet(nbuf, QDF_RX, NULL, 0);
1779 
1780 		qdf_nbuf_set_dev(nbuf, dp_intf->dev);
1781 		qdf_nbuf_set_protocol_eth_tye_trans(nbuf);
1782 		++stats->per_cpu[cpu_index].rx_packets;
1783 		qdf_net_stats_add_rx_pkts(&dp_intf->stats, 1);
1784 		/* count aggregated RX frame into stats */
1785 		qdf_net_stats_add_rx_pkts(&dp_intf->stats,
1786 					  qdf_nbuf_get_gso_segs(nbuf));
1787 		qdf_net_stats_add_rx_bytes(&dp_intf->stats,
1788 					   qdf_nbuf_len(nbuf));
1789 
1790 		/* Incr GW Rx count for NUD tracking based on GW mac addr */
1791 		dp_nud_incr_gw_rx_pkt_cnt(dp_intf, mac_addr);
1792 
1793 		/* Check & drop replayed mcast packets (for IPV6) */
1794 		if (dp_ctx->dp_cfg.multicast_replay_filter &&
1795 				qdf_nbuf_is_mcast_replay(nbuf)) {
1796 			qdf_atomic_inc(&stats->rx_usolict_arp_n_mcast_drp);
1797 			qdf_nbuf_free(nbuf);
1798 			continue;
1799 		}
1800 
1801 		/* hold configurable wakelock for unicast traffic */
1802 		if (!dp_is_current_high_throughput(dp_ctx) &&
1803 		    dp_ctx->dp_cfg.rx_wakelock_timeout &&
1804 		    dp_link->conn_info.is_authenticated)
1805 			wake_lock = dp_is_rx_wake_lock_needed(nbuf);
1806 
1807 		if (wake_lock) {
1808 			cds_host_diag_log_work(&dp_ctx->rx_wake_lock,
1809 					dp_ctx->dp_cfg.rx_wakelock_timeout,
1810 					WIFI_POWER_EVENT_WAKELOCK_HOLD_RX);
1811 			qdf_wake_lock_timeout_acquire(&dp_ctx->rx_wake_lock,
1812 					dp_ctx->dp_cfg.rx_wakelock_timeout);
1813 		}
1814 
1815 		/* Remove SKB from internal tracking table before submitting
1816 		 * it to stack
1817 		 */
1818 		qdf_net_buf_debug_release_skb(nbuf);
1819 
1820 		dp_tsf_timestamp_rx(dp_ctx, nbuf);
1821 
1822 		if (send_over_nl && dp_ctx->dp_ops.dp_send_rx_pkt_over_nl) {
1823 			if (dp_ctx->dp_ops.dp_send_rx_pkt_over_nl(dp_intf->dev,
1824 					(u8 *)&dp_link->conn_info.peer_macaddr,
1825 								  nbuf, false))
1826 				qdf_status = QDF_STATUS_SUCCESS;
1827 			else
1828 				qdf_status = QDF_STATUS_E_INVAL;
1829 			qdf_nbuf_dev_kfree(nbuf);
1830 		} else {
1831 			qdf_status = wlan_dp_rx_deliver_to_stack(dp_intf, nbuf);
1832 		}
1833 
1834 		if (QDF_IS_STATUS_SUCCESS(qdf_status)) {
1835 			++stats->per_cpu[cpu_index].rx_delivered;
1836 			if (track_arp)
1837 				++dp_intf->dp_stats.arp_stats.rx_delivered;
1838 			if (is_eapol)
1839 				++dp_intf->dp_stats.eapol_stats.
1840 				rx_delivered[subtype - QDF_PROTO_EAPOL_M1];
1841 			else if (is_dhcp)
1842 				++dp_intf->dp_stats.dhcp_stats.
1843 				rx_delivered[subtype - QDF_PROTO_DHCP_DISCOVER];
1844 
1845 			/* track connectivity stats */
1846 			if (dp_intf->pkt_type_bitmap)
1847 				dp_tx_rx_collect_connectivity_stats_info(
1848 					nbuf, dp_link,
1849 					PKT_TYPE_RX_DELIVERED,
1850 					&pkt_type);
1851 		} else {
1852 			++stats->per_cpu[cpu_index].rx_refused;
1853 			if (track_arp)
1854 				++dp_intf->dp_stats.arp_stats.rx_refused;
1855 
1856 			if (is_eapol)
1857 				++dp_intf->dp_stats.eapol_stats.
1858 				       rx_refused[subtype - QDF_PROTO_EAPOL_M1];
1859 			else if (is_dhcp)
1860 				++dp_intf->dp_stats.dhcp_stats.
1861 				  rx_refused[subtype - QDF_PROTO_DHCP_DISCOVER];
1862 
1863 			/* track connectivity stats */
1864 			if (dp_intf->pkt_type_bitmap)
1865 				dp_tx_rx_collect_connectivity_stats_info(
1866 					nbuf, dp_link,
1867 					PKT_TYPE_RX_REFUSED,
1868 					&pkt_type);
1869 		}
1870 	}
1871 
1872 	return QDF_STATUS_SUCCESS;
1873 }
1874 
1875