xref: /wlan-driver/qcacld-3.0/core/dp/txrx/ol_rx.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2011-2020 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name #include <qdf_nbuf.h>               /* qdf_nbuf_t, etc. */
21*5113495bSYour Name #include <qdf_util.h>               /* qdf_cpu_to_le64 */
22*5113495bSYour Name #include <qdf_types.h>              /* bool */
23*5113495bSYour Name #include <cds_ieee80211_common.h>   /* ieee80211_frame */
24*5113495bSYour Name 
25*5113495bSYour Name /* external API header files */
26*5113495bSYour Name #include <ol_ctrl_txrx_api.h>   /* ol_rx_notify */
27*5113495bSYour Name #include <ol_txrx_api.h>        /* ol_txrx_pdev_handle */
28*5113495bSYour Name #include <ol_txrx_htt_api.h>    /* ol_rx_indication_handler */
29*5113495bSYour Name #include <ol_htt_rx_api.h>      /* htt_rx_peer_id, etc. */
30*5113495bSYour Name 
31*5113495bSYour Name /* internal API header files */
32*5113495bSYour Name #include <ol_txrx_peer_find.h>  /* ol_txrx_peer_find_by_id */
33*5113495bSYour Name #include <ol_rx_reorder.h>      /* ol_rx_reorder_store, etc. */
34*5113495bSYour Name #include <ol_rx_reorder_timeout.h>      /* OL_RX_REORDER_TIMEOUT_UPDATE */
35*5113495bSYour Name #include <ol_rx_defrag.h>       /* ol_rx_defrag_waitlist_flush */
36*5113495bSYour Name #include <ol_txrx_internal.h>
37*5113495bSYour Name #include <ol_txrx.h>
38*5113495bSYour Name #include <wdi_event.h>
39*5113495bSYour Name #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
40*5113495bSYour Name #include <ol_txrx_encap.h>      /* ol_rx_decap_info_t, etc */
41*5113495bSYour Name #endif
42*5113495bSYour Name #include <ol_rx.h>
43*5113495bSYour Name 
44*5113495bSYour Name /* FIX THIS: txrx should not include private header files of other modules */
45*5113495bSYour Name #include <htt_types.h>
46*5113495bSYour Name #include <ol_if_athvar.h>
47*5113495bSYour Name #include <enet.h>               /* ethernet + SNAP/LLC header defs and
48*5113495bSYour Name 				 * ethertype values
49*5113495bSYour Name 				 */
50*5113495bSYour Name #include <ip_prot.h>            /* IP protocol values */
51*5113495bSYour Name #include <ipv4.h>               /* IPv4 header defs */
52*5113495bSYour Name #include <ipv6_defs.h>          /* IPv6 header defs */
53*5113495bSYour Name #include <ol_vowext_dbg_defs.h>
54*5113495bSYour Name #include <wma.h>
55*5113495bSYour Name #include <wlan_policy_mgr_api.h>
56*5113495bSYour Name #include "pktlog_ac_fmt.h"
57*5113495bSYour Name #include <cdp_txrx_handle.h>
58*5113495bSYour Name #include <pld_common.h>
59*5113495bSYour Name #include <htt_internal.h>
60*5113495bSYour Name #include <wlan_pkt_capture_ucfg_api.h>
61*5113495bSYour Name #include <wlan_cfr_ucfg_api.h>
62*5113495bSYour Name 
63*5113495bSYour Name #ifndef OL_RX_INDICATION_MAX_RECORDS
64*5113495bSYour Name #define OL_RX_INDICATION_MAX_RECORDS 2048
65*5113495bSYour Name #endif
66*5113495bSYour Name 
67*5113495bSYour Name /**
68*5113495bSYour Name  * enum ol_rx_ind_record_type - OL rx indication events
69*5113495bSYour Name  * @OL_RX_INDICATION_POP_START: event recorded before netbuf pop
70*5113495bSYour Name  * @OL_RX_INDICATION_POP_END: event recorded after netbuf pop
71*5113495bSYour Name  * @OL_RX_INDICATION_BUF_REPLENISH: event recorded after buffer replenishment
72*5113495bSYour Name  */
73*5113495bSYour Name enum ol_rx_ind_record_type {
74*5113495bSYour Name 	OL_RX_INDICATION_POP_START,
75*5113495bSYour Name 	OL_RX_INDICATION_POP_END,
76*5113495bSYour Name 	OL_RX_INDICATION_BUF_REPLENISH,
77*5113495bSYour Name };
78*5113495bSYour Name 
79*5113495bSYour Name /**
80*5113495bSYour Name  * struct ol_rx_ind_record - structure for detailing ol txrx rx ind. event
81*5113495bSYour Name  * @value: info corresponding to rx indication event
82*5113495bSYour Name  * @type: what the event was
83*5113495bSYour Name  * @time: when it happened
84*5113495bSYour Name  */
85*5113495bSYour Name struct ol_rx_ind_record {
86*5113495bSYour Name 	uint16_t value;
87*5113495bSYour Name 	enum ol_rx_ind_record_type type;
88*5113495bSYour Name 	uint64_t time;
89*5113495bSYour Name };
90*5113495bSYour Name 
91*5113495bSYour Name #ifdef OL_RX_INDICATION_RECORD
92*5113495bSYour Name static uint32_t ol_rx_ind_record_index;
93*5113495bSYour Name struct ol_rx_ind_record
94*5113495bSYour Name 	      ol_rx_indication_record_history[OL_RX_INDICATION_MAX_RECORDS];
95*5113495bSYour Name 
96*5113495bSYour Name /**
97*5113495bSYour Name  * ol_rx_ind_record_event() - record ol rx indication events
98*5113495bSYour Name  * @value: contains rx ind. event related info
99*5113495bSYour Name  * @type: ol rx indication message type
100*5113495bSYour Name  *
101*5113495bSYour Name  * This API record the ol rx indiation event in a rx indication
102*5113495bSYour Name  * record buffer.
103*5113495bSYour Name  *
104*5113495bSYour Name  * Return: None
105*5113495bSYour Name  */
ol_rx_ind_record_event(uint32_t value,enum ol_rx_ind_record_type type)106*5113495bSYour Name static void ol_rx_ind_record_event(uint32_t value,
107*5113495bSYour Name 				    enum ol_rx_ind_record_type type)
108*5113495bSYour Name {
109*5113495bSYour Name 	ol_rx_indication_record_history[ol_rx_ind_record_index].value = value;
110*5113495bSYour Name 	ol_rx_indication_record_history[ol_rx_ind_record_index].type = type;
111*5113495bSYour Name 	ol_rx_indication_record_history[ol_rx_ind_record_index].time =
112*5113495bSYour Name 							qdf_get_log_timestamp();
113*5113495bSYour Name 
114*5113495bSYour Name 	ol_rx_ind_record_index++;
115*5113495bSYour Name 	if (ol_rx_ind_record_index >= OL_RX_INDICATION_MAX_RECORDS)
116*5113495bSYour Name 		ol_rx_ind_record_index = 0;
117*5113495bSYour Name }
118*5113495bSYour Name #else
119*5113495bSYour Name static inline
ol_rx_ind_record_event(uint32_t value,enum ol_rx_ind_record_type type)120*5113495bSYour Name void ol_rx_ind_record_event(uint32_t value, enum ol_rx_ind_record_type type)
121*5113495bSYour Name {
122*5113495bSYour Name }
123*5113495bSYour Name 
124*5113495bSYour Name #endif /* OL_RX_INDICATION_RECORD */
125*5113495bSYour Name 
126*5113495bSYour Name void ol_rx_data_process(struct ol_txrx_peer_t *peer,
127*5113495bSYour Name 			qdf_nbuf_t rx_buf_list);
128*5113495bSYour Name 
129*5113495bSYour Name #ifdef WDI_EVENT_ENABLE
130*5113495bSYour Name /**
131*5113495bSYour Name  * ol_rx_send_pktlog_event() - send rx packetlog event
132*5113495bSYour Name  * @pdev: pdev handle
133*5113495bSYour Name  * @peer: peer handle
134*5113495bSYour Name  * @msdu: skb list
135*5113495bSYour Name  * @pktlog_bit: packetlog bit from firmware
136*5113495bSYour Name  *
137*5113495bSYour Name  * Return: none
138*5113495bSYour Name  */
139*5113495bSYour Name #ifdef HELIUMPLUS
ol_rx_send_pktlog_event(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer,qdf_nbuf_t msdu,uint8_t pktlog_bit)140*5113495bSYour Name void ol_rx_send_pktlog_event(struct ol_txrx_pdev_t *pdev,
141*5113495bSYour Name 	struct ol_txrx_peer_t *peer, qdf_nbuf_t msdu, uint8_t pktlog_bit)
142*5113495bSYour Name {
143*5113495bSYour Name 	struct ol_rx_remote_data data;
144*5113495bSYour Name 
145*5113495bSYour Name 	/**
146*5113495bSYour Name 	 * pktlog is meant to log rx_desc information which is
147*5113495bSYour Name 	 * already overwritten by radio header when monitor mode is ON.
148*5113495bSYour Name 	 * Therefore, Do not log pktlog event when monitor mode is ON.
149*5113495bSYour Name 	 */
150*5113495bSYour Name 	if (!pktlog_bit || (cds_get_conparam() == QDF_GLOBAL_MONITOR_MODE))
151*5113495bSYour Name 		return;
152*5113495bSYour Name 
153*5113495bSYour Name 	data.msdu = msdu;
154*5113495bSYour Name 	if (peer)
155*5113495bSYour Name 		data.mac_id = peer->vdev->mac_id;
156*5113495bSYour Name 	else
157*5113495bSYour Name 		data.mac_id = 0;
158*5113495bSYour Name 
159*5113495bSYour Name 	wdi_event_handler(WDI_EVENT_RX_DESC_REMOTE, pdev->id,
160*5113495bSYour Name 			  &data);
161*5113495bSYour Name }
162*5113495bSYour Name #else
ol_rx_send_pktlog_event(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer,qdf_nbuf_t msdu,uint8_t pktlog_bit)163*5113495bSYour Name void ol_rx_send_pktlog_event(struct ol_txrx_pdev_t *pdev,
164*5113495bSYour Name 	struct ol_txrx_peer_t *peer, qdf_nbuf_t msdu, uint8_t pktlog_bit)
165*5113495bSYour Name {
166*5113495bSYour Name 	struct ol_rx_remote_data data;
167*5113495bSYour Name 
168*5113495bSYour Name 	/**
169*5113495bSYour Name 	 * pktlog is meant to log rx_desc information which is
170*5113495bSYour Name 	 * already overwritten by radio header when monitor mode is ON.
171*5113495bSYour Name 	 * Therefore, Do not log pktlog event when monitor mode is ON.
172*5113495bSYour Name 	 */
173*5113495bSYour Name 	if (cds_get_conparam() == QDF_GLOBAL_MONITOR_MODE)
174*5113495bSYour Name 		return;
175*5113495bSYour Name 
176*5113495bSYour Name 	data.msdu = msdu;
177*5113495bSYour Name 	if (peer)
178*5113495bSYour Name 		data.mac_id = peer->vdev->mac_id;
179*5113495bSYour Name 	else
180*5113495bSYour Name 		data.mac_id = 0;
181*5113495bSYour Name 
182*5113495bSYour Name 	wdi_event_handler(WDI_EVENT_RX_DESC_REMOTE, pdev->id,
183*5113495bSYour Name 			  &data);
184*5113495bSYour Name }
185*5113495bSYour Name #endif
186*5113495bSYour Name #endif /* WDI_EVENT_ENABLE */
187*5113495bSYour Name 
188*5113495bSYour Name #ifdef HTT_RX_RESTORE
189*5113495bSYour Name 
ol_rx_restore_handler(struct work_struct * htt_rx)190*5113495bSYour Name static void ol_rx_restore_handler(struct work_struct *htt_rx)
191*5113495bSYour Name {
192*5113495bSYour Name 	qdf_device_t qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
193*5113495bSYour Name 
194*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
195*5113495bSYour Name 		  "Enter: %s", __func__);
196*5113495bSYour Name 	pld_device_self_recovery(qdf_ctx->dev);
197*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
198*5113495bSYour Name 		  "Exit: %s", __func__);
199*5113495bSYour Name }
200*5113495bSYour Name 
201*5113495bSYour Name static DECLARE_WORK(ol_rx_restore_work, ol_rx_restore_handler);
202*5113495bSYour Name 
ol_rx_trigger_restore(htt_pdev_handle htt_pdev,qdf_nbuf_t head_msdu,qdf_nbuf_t tail_msdu)203*5113495bSYour Name void ol_rx_trigger_restore(htt_pdev_handle htt_pdev, qdf_nbuf_t head_msdu,
204*5113495bSYour Name 			   qdf_nbuf_t tail_msdu)
205*5113495bSYour Name {
206*5113495bSYour Name 	qdf_nbuf_t next;
207*5113495bSYour Name 
208*5113495bSYour Name 	while (head_msdu) {
209*5113495bSYour Name 		next = qdf_nbuf_next(head_msdu);
210*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
211*5113495bSYour Name 			  "freeing %pK\n", head_msdu);
212*5113495bSYour Name 		qdf_nbuf_free(head_msdu);
213*5113495bSYour Name 		head_msdu = next;
214*5113495bSYour Name 	}
215*5113495bSYour Name 
216*5113495bSYour Name 	if (!htt_pdev->rx_ring.htt_rx_restore) {
217*5113495bSYour Name 		cds_set_recovery_in_progress(true);
218*5113495bSYour Name 		htt_pdev->rx_ring.htt_rx_restore = 1;
219*5113495bSYour Name 		schedule_work(&ol_rx_restore_work);
220*5113495bSYour Name 	}
221*5113495bSYour Name }
222*5113495bSYour Name #endif
223*5113495bSYour Name 
224*5113495bSYour Name /**
225*5113495bSYour Name  * ol_rx_update_histogram_stats() - update rx histogram statistics
226*5113495bSYour Name  * @msdu_count: msdu count
227*5113495bSYour Name  * @frag_ind: fragment indication set
228*5113495bSYour Name  * @offload_ind: offload indication set
229*5113495bSYour Name  *
230*5113495bSYour Name  * Return: none
231*5113495bSYour Name  */
ol_rx_update_histogram_stats(uint32_t msdu_count,uint8_t frag_ind,uint8_t offload_ind)232*5113495bSYour Name void ol_rx_update_histogram_stats(uint32_t msdu_count, uint8_t frag_ind,
233*5113495bSYour Name 		 uint8_t offload_ind)
234*5113495bSYour Name {
235*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
236*5113495bSYour Name 	ol_txrx_pdev_handle pdev;
237*5113495bSYour Name 
238*5113495bSYour Name 	if (qdf_unlikely(!soc))
239*5113495bSYour Name 		return;
240*5113495bSYour Name 
241*5113495bSYour Name 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
242*5113495bSYour Name 	if (!pdev) {
243*5113495bSYour Name 		ol_txrx_err("pdev is NULL");
244*5113495bSYour Name 		return;
245*5113495bSYour Name 	}
246*5113495bSYour Name 
247*5113495bSYour Name 	if (msdu_count > 60) {
248*5113495bSYour Name 		TXRX_STATS_ADD(pdev, pub.rx.rx_ind_histogram.pkts_61_plus, 1);
249*5113495bSYour Name 	} else if (msdu_count > 50) {
250*5113495bSYour Name 		TXRX_STATS_ADD(pdev, pub.rx.rx_ind_histogram.pkts_51_60, 1);
251*5113495bSYour Name 	} else if (msdu_count > 40) {
252*5113495bSYour Name 		TXRX_STATS_ADD(pdev, pub.rx.rx_ind_histogram.pkts_41_50, 1);
253*5113495bSYour Name 	} else if (msdu_count > 30) {
254*5113495bSYour Name 		TXRX_STATS_ADD(pdev, pub.rx.rx_ind_histogram.pkts_31_40, 1);
255*5113495bSYour Name 	} else if (msdu_count > 20) {
256*5113495bSYour Name 		TXRX_STATS_ADD(pdev, pub.rx.rx_ind_histogram.pkts_21_30, 1);
257*5113495bSYour Name 	} else if (msdu_count > 10) {
258*5113495bSYour Name 		TXRX_STATS_ADD(pdev, pub.rx.rx_ind_histogram.pkts_11_20, 1);
259*5113495bSYour Name 	} else if (msdu_count > 1) {
260*5113495bSYour Name 		TXRX_STATS_ADD(pdev, pub.rx.rx_ind_histogram.pkts_2_10, 1);
261*5113495bSYour Name 	} else if (msdu_count == 1) {
262*5113495bSYour Name 		TXRX_STATS_ADD(pdev, pub.rx.rx_ind_histogram.pkts_1, 1);
263*5113495bSYour Name 	}
264*5113495bSYour Name 
265*5113495bSYour Name 	if (frag_ind)
266*5113495bSYour Name 		TXRX_STATS_ADD(pdev, pub.rx.msdus_with_frag_ind, msdu_count);
267*5113495bSYour Name 
268*5113495bSYour Name 	if (offload_ind)
269*5113495bSYour Name 		TXRX_STATS_ADD(pdev, pub.rx.msdus_with_offload_ind, msdu_count);
270*5113495bSYour Name 
271*5113495bSYour Name }
272*5113495bSYour Name 
273*5113495bSYour Name #ifdef WLAN_PARTIAL_REORDER_OFFLOAD
274*5113495bSYour Name 
275*5113495bSYour Name #ifdef WDI_EVENT_ENABLE
ol_rx_process_inv_peer(ol_txrx_pdev_handle pdev,void * rx_mpdu_desc,qdf_nbuf_t msdu)276*5113495bSYour Name static void ol_rx_process_inv_peer(ol_txrx_pdev_handle pdev,
277*5113495bSYour Name 				   void *rx_mpdu_desc, qdf_nbuf_t msdu)
278*5113495bSYour Name {
279*5113495bSYour Name 	uint8_t a1[QDF_MAC_ADDR_SIZE];
280*5113495bSYour Name 	htt_pdev_handle htt_pdev = pdev->htt_pdev;
281*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev = NULL;
282*5113495bSYour Name 	struct ieee80211_frame *wh;
283*5113495bSYour Name 	struct wdi_event_rx_peer_invalid_msg msg;
284*5113495bSYour Name 
285*5113495bSYour Name 	wh = (struct ieee80211_frame *)
286*5113495bSYour Name 	     htt_rx_mpdu_wifi_hdr_retrieve(htt_pdev, rx_mpdu_desc);
287*5113495bSYour Name 	/*
288*5113495bSYour Name 	 * Klocwork issue #6152
289*5113495bSYour Name 	 *  All targets that send a "INVALID_PEER" rx status provide a
290*5113495bSYour Name 	 *  802.11 header for each rx MPDU, so it is certain that
291*5113495bSYour Name 	 *  htt_rx_mpdu_wifi_hdr_retrieve will succeed.
292*5113495bSYour Name 	 *  However, both for robustness, e.g. if this function is given a
293*5113495bSYour Name 	 *  MSDU descriptor rather than a MPDU descriptor, and to make it
294*5113495bSYour Name 	 *  clear to static analysis that this code is safe, add an explicit
295*5113495bSYour Name 	 *  check that htt_rx_mpdu_wifi_hdr_retrieve provides a non-NULL value.
296*5113495bSYour Name 	 */
297*5113495bSYour Name 	if (!wh || !IEEE80211_IS_DATA(wh))
298*5113495bSYour Name 		return;
299*5113495bSYour Name 
300*5113495bSYour Name 	/* ignore frames for non-existent bssids */
301*5113495bSYour Name 	qdf_mem_copy(a1, wh->i_addr1, QDF_MAC_ADDR_SIZE);
302*5113495bSYour Name 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
303*5113495bSYour Name 		if (qdf_mem_cmp(a1, vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE))
304*5113495bSYour Name 			break;
305*5113495bSYour Name 	}
306*5113495bSYour Name 	if (!vdev)
307*5113495bSYour Name 		return;
308*5113495bSYour Name 
309*5113495bSYour Name 	msg.wh = wh;
310*5113495bSYour Name 	msg.msdu = msdu;
311*5113495bSYour Name 	msg.vdev_id = vdev->vdev_id;
312*5113495bSYour Name 	wdi_event_handler(WDI_EVENT_RX_PEER_INVALID, pdev->id,
313*5113495bSYour Name 			  &msg);
314*5113495bSYour Name }
315*5113495bSYour Name #else
316*5113495bSYour Name static inline
ol_rx_process_inv_peer(ol_txrx_pdev_handle pdev,void * rx_mpdu_desc,qdf_nbuf_t msdu)317*5113495bSYour Name void ol_rx_process_inv_peer(ol_txrx_pdev_handle pdev,
318*5113495bSYour Name 			    void *rx_mpdu_desc, qdf_nbuf_t msdu)
319*5113495bSYour Name {
320*5113495bSYour Name }
321*5113495bSYour Name #endif
322*5113495bSYour Name 
323*5113495bSYour Name #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
324*5113495bSYour Name static inline int16_t
ol_rx_rssi_avg(struct ol_txrx_pdev_t * pdev,int16_t rssi_old,int16_t rssi_new)325*5113495bSYour Name ol_rx_rssi_avg(struct ol_txrx_pdev_t *pdev, int16_t rssi_old, int16_t rssi_new)
326*5113495bSYour Name {
327*5113495bSYour Name 	int rssi_old_weight;
328*5113495bSYour Name 
329*5113495bSYour Name 	if (rssi_new == HTT_RSSI_INVALID)
330*5113495bSYour Name 		return rssi_old;
331*5113495bSYour Name 	if (rssi_old == HTT_RSSI_INVALID)
332*5113495bSYour Name 		return rssi_new;
333*5113495bSYour Name 
334*5113495bSYour Name 	rssi_old_weight =
335*5113495bSYour Name 		(1 << pdev->rssi_update_shift) - pdev->rssi_new_weight;
336*5113495bSYour Name 	return (rssi_new * pdev->rssi_new_weight +
337*5113495bSYour Name 		rssi_old * rssi_old_weight) >> pdev->rssi_update_shift;
338*5113495bSYour Name }
339*5113495bSYour Name 
340*5113495bSYour Name static void
ol_rx_ind_rssi_update(struct ol_txrx_peer_t * peer,qdf_nbuf_t rx_ind_msg)341*5113495bSYour Name ol_rx_ind_rssi_update(struct ol_txrx_peer_t *peer, qdf_nbuf_t rx_ind_msg)
342*5113495bSYour Name {
343*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
344*5113495bSYour Name 
345*5113495bSYour Name 	peer->rssi_dbm = ol_rx_rssi_avg(pdev, peer->rssi_dbm,
346*5113495bSYour Name 					htt_rx_ind_rssi_dbm(pdev->htt_pdev,
347*5113495bSYour Name 							    rx_ind_msg));
348*5113495bSYour Name }
349*5113495bSYour Name 
350*5113495bSYour Name static void
ol_rx_mpdu_rssi_update(struct ol_txrx_peer_t * peer,void * rx_mpdu_desc)351*5113495bSYour Name ol_rx_mpdu_rssi_update(struct ol_txrx_peer_t *peer, void *rx_mpdu_desc)
352*5113495bSYour Name {
353*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
354*5113495bSYour Name 
355*5113495bSYour Name 	if (!peer)
356*5113495bSYour Name 		return;
357*5113495bSYour Name 	peer->rssi_dbm = ol_rx_rssi_avg(pdev, peer->rssi_dbm,
358*5113495bSYour Name 					htt_rx_mpdu_desc_rssi_dbm(
359*5113495bSYour Name 						pdev->htt_pdev,
360*5113495bSYour Name 						rx_mpdu_desc));
361*5113495bSYour Name }
362*5113495bSYour Name 
363*5113495bSYour Name #else
364*5113495bSYour Name #define ol_rx_ind_rssi_update(peer, rx_ind_msg) /* no-op */
365*5113495bSYour Name #define ol_rx_mpdu_rssi_update(peer, rx_mpdu_desc)      /* no-op */
366*5113495bSYour Name #endif /* QCA_SUPPORT_PEER_DATA_RX_RSSI */
367*5113495bSYour Name 
discard_msdus(htt_pdev_handle htt_pdev,qdf_nbuf_t head_msdu,qdf_nbuf_t tail_msdu)368*5113495bSYour Name static void discard_msdus(htt_pdev_handle htt_pdev,
369*5113495bSYour Name 			  qdf_nbuf_t head_msdu,
370*5113495bSYour Name 			  qdf_nbuf_t tail_msdu)
371*5113495bSYour Name {
372*5113495bSYour Name 	while (1) {
373*5113495bSYour Name 		qdf_nbuf_t next;
374*5113495bSYour Name 
375*5113495bSYour Name 		next = qdf_nbuf_next(
376*5113495bSYour Name 			head_msdu);
377*5113495bSYour Name 		htt_rx_desc_frame_free
378*5113495bSYour Name 			(htt_pdev,
379*5113495bSYour Name 			 head_msdu);
380*5113495bSYour Name 		if (head_msdu ==
381*5113495bSYour Name 		    tail_msdu) {
382*5113495bSYour Name 			break;
383*5113495bSYour Name 		}
384*5113495bSYour Name 		head_msdu = next;
385*5113495bSYour Name 	}
386*5113495bSYour Name }
387*5113495bSYour Name 
chain_msdus(htt_pdev_handle htt_pdev,qdf_nbuf_t head_msdu,qdf_nbuf_t tail_msdu)388*5113495bSYour Name static void chain_msdus(htt_pdev_handle htt_pdev,
389*5113495bSYour Name 			qdf_nbuf_t head_msdu,
390*5113495bSYour Name 			qdf_nbuf_t tail_msdu)
391*5113495bSYour Name {
392*5113495bSYour Name 	while (1) {
393*5113495bSYour Name 		qdf_nbuf_t next;
394*5113495bSYour Name 
395*5113495bSYour Name 		next = qdf_nbuf_next(head_msdu);
396*5113495bSYour Name 		htt_rx_desc_frame_free(
397*5113495bSYour Name 			htt_pdev,
398*5113495bSYour Name 			head_msdu);
399*5113495bSYour Name 		if (head_msdu == tail_msdu)
400*5113495bSYour Name 			break;
401*5113495bSYour Name 		head_msdu = next;
402*5113495bSYour Name 	}
403*5113495bSYour Name }
404*5113495bSYour Name 
process_reorder(ol_txrx_pdev_handle pdev,void * rx_mpdu_desc,uint8_t tid,struct ol_txrx_peer_t * peer,qdf_nbuf_t head_msdu,qdf_nbuf_t tail_msdu,int num_mpdu_ranges,int num_mpdus,bool rx_ind_release)405*5113495bSYour Name static void process_reorder(ol_txrx_pdev_handle pdev,
406*5113495bSYour Name 			    void *rx_mpdu_desc,
407*5113495bSYour Name 			    uint8_t tid,
408*5113495bSYour Name 			    struct ol_txrx_peer_t *peer,
409*5113495bSYour Name 			    qdf_nbuf_t head_msdu,
410*5113495bSYour Name 			    qdf_nbuf_t tail_msdu,
411*5113495bSYour Name 			    int num_mpdu_ranges,
412*5113495bSYour Name 			    int num_mpdus,
413*5113495bSYour Name 			    bool rx_ind_release)
414*5113495bSYour Name {
415*5113495bSYour Name 	htt_pdev_handle htt_pdev = pdev->htt_pdev;
416*5113495bSYour Name 	enum htt_rx_status mpdu_status;
417*5113495bSYour Name 	int reorder_idx;
418*5113495bSYour Name 
419*5113495bSYour Name 	reorder_idx = htt_rx_mpdu_desc_reorder_idx(htt_pdev, rx_mpdu_desc,
420*5113495bSYour Name 						   true);
421*5113495bSYour Name 	OL_RX_REORDER_TRACE_ADD(pdev, tid,
422*5113495bSYour Name 				reorder_idx,
423*5113495bSYour Name 				htt_rx_mpdu_desc_seq_num(htt_pdev,
424*5113495bSYour Name 							 rx_mpdu_desc, false),
425*5113495bSYour Name 				1);
426*5113495bSYour Name 	ol_rx_mpdu_rssi_update(peer, rx_mpdu_desc);
427*5113495bSYour Name 	/*
428*5113495bSYour Name 	 * In most cases, out-of-bounds and duplicate sequence number detection
429*5113495bSYour Name 	 * is performed by the target, but in some cases it is done by the host.
430*5113495bSYour Name 	 * Specifically, the host does rx out-of-bounds sequence number
431*5113495bSYour Name 	 * detection for:
432*5113495bSYour Name 	 * 1.  Peregrine or Rome target
433*5113495bSYour Name 	 *     for peer-TIDs that do not have aggregation enabled, if the
434*5113495bSYour Name 	 *     RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK flag
435*5113495bSYour Name 	 *     is set during the driver build.
436*5113495bSYour Name 	 * 2.  Riva-family targets, which have rx reorder timeouts handled by
437*5113495bSYour Name 	 *     the host rather than the target.
438*5113495bSYour Name 	 *     (The target already does duplicate detection, but the host
439*5113495bSYour Name 	 *     may have given up waiting for a particular sequence number before
440*5113495bSYour Name 	 *     it arrives.  In this case, the out-of-bounds sequence number
441*5113495bSYour Name 	 *     of the late frame allows the host to discard it, rather than
442*5113495bSYour Name 	 *     sending it out of order.
443*5113495bSYour Name 	 */
444*5113495bSYour Name 	mpdu_status = OL_RX_SEQ_NUM_CHECK(pdev,
445*5113495bSYour Name 						  peer,
446*5113495bSYour Name 						  tid,
447*5113495bSYour Name 						  rx_mpdu_desc);
448*5113495bSYour Name 	if (mpdu_status != htt_rx_status_ok) {
449*5113495bSYour Name 		/*
450*5113495bSYour Name 		 * If the sequence number was out of bounds, the MPDU needs
451*5113495bSYour Name 		 * to be discarded.
452*5113495bSYour Name 		 */
453*5113495bSYour Name 		discard_msdus(htt_pdev, head_msdu, tail_msdu);
454*5113495bSYour Name 		/*
455*5113495bSYour Name 		 * For Peregrine and Rome,
456*5113495bSYour Name 		 * OL_RX_REORDER_SEQ_NUM_CHECK should only fail for the case
457*5113495bSYour Name 		 * of (duplicate) non-aggregates.
458*5113495bSYour Name 		 *
459*5113495bSYour Name 		 * For Riva, Pronto and Northstar,
460*5113495bSYour Name 		 * there should be only one MPDU delivered at a time.
461*5113495bSYour Name 		 * Thus, there are no further MPDUs that need to be
462*5113495bSYour Name 		 * processed here.
463*5113495bSYour Name 		 * Just to be sure this is true, check the assumption
464*5113495bSYour Name 		 * that this was the only MPDU referenced by the rx
465*5113495bSYour Name 		 * indication.
466*5113495bSYour Name 		 */
467*5113495bSYour Name 		TXRX_ASSERT2((num_mpdu_ranges == 1) && num_mpdus == 1);
468*5113495bSYour Name 
469*5113495bSYour Name 		/*
470*5113495bSYour Name 		 * The MPDU was not stored in the rx reorder array, so
471*5113495bSYour Name 		 * there's nothing to release.
472*5113495bSYour Name 		 */
473*5113495bSYour Name 		rx_ind_release = false;
474*5113495bSYour Name 	} else {
475*5113495bSYour Name 		ol_rx_reorder_store(pdev, peer, tid,
476*5113495bSYour Name 				    reorder_idx, head_msdu, tail_msdu);
477*5113495bSYour Name 		if (peer->tids_rx_reorder[tid].win_sz_mask == 0) {
478*5113495bSYour Name 			peer->tids_last_seq[tid] = htt_rx_mpdu_desc_seq_num(
479*5113495bSYour Name 				htt_pdev,
480*5113495bSYour Name 				rx_mpdu_desc, false);
481*5113495bSYour Name 		}
482*5113495bSYour Name 	}
483*5113495bSYour Name } /* process_reorder */
484*5113495bSYour Name 
485*5113495bSYour Name #ifdef WLAN_FEATURE_DSRC
486*5113495bSYour Name static void
ol_rx_ocb_update_peer(ol_txrx_pdev_handle pdev,qdf_nbuf_t rx_ind_msg,struct ol_txrx_peer_t * peer)487*5113495bSYour Name ol_rx_ocb_update_peer(ol_txrx_pdev_handle pdev, qdf_nbuf_t rx_ind_msg,
488*5113495bSYour Name 		      struct ol_txrx_peer_t *peer)
489*5113495bSYour Name {
490*5113495bSYour Name 	int i;
491*5113495bSYour Name 
492*5113495bSYour Name 	htt_rx_ind_legacy_rate(pdev->htt_pdev, rx_ind_msg,
493*5113495bSYour Name 			       &peer->last_pkt_legacy_rate,
494*5113495bSYour Name 			       &peer->last_pkt_legacy_rate_sel);
495*5113495bSYour Name 	peer->last_pkt_rssi_cmb = htt_rx_ind_rssi_dbm(
496*5113495bSYour Name 				pdev->htt_pdev, rx_ind_msg);
497*5113495bSYour Name 	for (i = 0; i < 4; i++)
498*5113495bSYour Name 		peer->last_pkt_rssi[i] =
499*5113495bSYour Name 		    htt_rx_ind_rssi_dbm_chain(pdev->htt_pdev, rx_ind_msg, i);
500*5113495bSYour Name 
501*5113495bSYour Name 	htt_rx_ind_timestamp(pdev->htt_pdev, rx_ind_msg,
502*5113495bSYour Name 			     &peer->last_pkt_timestamp_microsec,
503*5113495bSYour Name 			     &peer->last_pkt_timestamp_submicrosec);
504*5113495bSYour Name 	peer->last_pkt_tsf = htt_rx_ind_tsf32(pdev->htt_pdev, rx_ind_msg);
505*5113495bSYour Name 	peer->last_pkt_tid = htt_rx_ind_ext_tid(pdev->htt_pdev, rx_ind_msg);
506*5113495bSYour Name }
507*5113495bSYour Name #else
508*5113495bSYour Name static void
ol_rx_ocb_update_peer(ol_txrx_pdev_handle pdev,qdf_nbuf_t rx_ind_msg,struct ol_txrx_peer_t * peer)509*5113495bSYour Name ol_rx_ocb_update_peer(ol_txrx_pdev_handle pdev, qdf_nbuf_t rx_ind_msg,
510*5113495bSYour Name 		      struct ol_txrx_peer_t *peer)
511*5113495bSYour Name {
512*5113495bSYour Name }
513*5113495bSYour Name #endif
514*5113495bSYour Name 
515*5113495bSYour Name void
ol_rx_indication_handler(ol_txrx_pdev_handle pdev,qdf_nbuf_t rx_ind_msg,uint16_t peer_id,uint8_t tid,int num_mpdu_ranges)516*5113495bSYour Name ol_rx_indication_handler(ol_txrx_pdev_handle pdev,
517*5113495bSYour Name 			 qdf_nbuf_t rx_ind_msg,
518*5113495bSYour Name 			 uint16_t peer_id, uint8_t tid, int num_mpdu_ranges)
519*5113495bSYour Name {
520*5113495bSYour Name 	int mpdu_range;
521*5113495bSYour Name 	unsigned int seq_num_start = 0, seq_num_end = 0;
522*5113495bSYour Name 	bool rx_ind_release = false;
523*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev = NULL;
524*5113495bSYour Name 	struct ol_txrx_peer_t *peer;
525*5113495bSYour Name 	htt_pdev_handle htt_pdev;
526*5113495bSYour Name 	uint16_t center_freq;
527*5113495bSYour Name 	uint16_t chan1;
528*5113495bSYour Name 	uint16_t chan2;
529*5113495bSYour Name 	uint8_t phymode;
530*5113495bSYour Name 	bool ret;
531*5113495bSYour Name 	uint32_t msdu_count = 0;
532*5113495bSYour Name 
533*5113495bSYour Name 	htt_pdev = pdev->htt_pdev;
534*5113495bSYour Name 	peer = ol_txrx_peer_find_by_id(pdev, peer_id);
535*5113495bSYour Name 	if (!peer) {
536*5113495bSYour Name 		/*
537*5113495bSYour Name 		 * If we can't find a peer send this packet to OCB interface
538*5113495bSYour Name 		 * using OCB self peer
539*5113495bSYour Name 		 */
540*5113495bSYour Name 		if (!ol_txrx_get_ocb_peer(pdev, &peer))
541*5113495bSYour Name 			peer = NULL;
542*5113495bSYour Name 	}
543*5113495bSYour Name 
544*5113495bSYour Name 	if (peer) {
545*5113495bSYour Name 		vdev = peer->vdev;
546*5113495bSYour Name 		ol_rx_ind_rssi_update(peer, rx_ind_msg);
547*5113495bSYour Name 
548*5113495bSYour Name 		if (vdev->opmode == wlan_op_mode_ocb)
549*5113495bSYour Name 			ol_rx_ocb_update_peer(pdev, rx_ind_msg, peer);
550*5113495bSYour Name 	}
551*5113495bSYour Name 
552*5113495bSYour Name 	TXRX_STATS_INCR(pdev, priv.rx.normal.ppdus);
553*5113495bSYour Name 
554*5113495bSYour Name 	OL_RX_REORDER_TIMEOUT_MUTEX_LOCK(pdev);
555*5113495bSYour Name 
556*5113495bSYour Name 	if (htt_rx_ind_flush(pdev->htt_pdev, rx_ind_msg) && peer) {
557*5113495bSYour Name 		htt_rx_ind_flush_seq_num_range(pdev->htt_pdev, rx_ind_msg,
558*5113495bSYour Name 					       &seq_num_start, &seq_num_end);
559*5113495bSYour Name 		if (tid == HTT_INVALID_TID) {
560*5113495bSYour Name 			/*
561*5113495bSYour Name 			 * host/FW reorder state went out-of sync
562*5113495bSYour Name 			 * for a while because FW ran out of Rx indication
563*5113495bSYour Name 			 * buffer. We have to discard all the buffers in
564*5113495bSYour Name 			 * reorder queue.
565*5113495bSYour Name 			 */
566*5113495bSYour Name 			ol_rx_reorder_peer_cleanup(vdev, peer);
567*5113495bSYour Name 		} else {
568*5113495bSYour Name 			if (tid >= OL_TXRX_NUM_EXT_TIDS) {
569*5113495bSYour Name 				ol_txrx_err("invalid tid, %u", tid);
570*5113495bSYour Name 				WARN_ON(1);
571*5113495bSYour Name 				return;
572*5113495bSYour Name 			}
573*5113495bSYour Name 			ol_rx_reorder_flush(vdev, peer, tid, seq_num_start,
574*5113495bSYour Name 					    seq_num_end, htt_rx_flush_release);
575*5113495bSYour Name 		}
576*5113495bSYour Name 	}
577*5113495bSYour Name 
578*5113495bSYour Name 	if (htt_rx_ind_release(pdev->htt_pdev, rx_ind_msg)) {
579*5113495bSYour Name 		/*
580*5113495bSYour Name 		 * The ind info of release is saved here and do release at the
581*5113495bSYour Name 		 * end. This is for the reason of in HL case, the qdf_nbuf_t
582*5113495bSYour Name 		 * for msg and payload are the same buf. And the buf will be
583*5113495bSYour Name 		 * changed during processing
584*5113495bSYour Name 		 */
585*5113495bSYour Name 		rx_ind_release = true;
586*5113495bSYour Name 		htt_rx_ind_release_seq_num_range(pdev->htt_pdev, rx_ind_msg,
587*5113495bSYour Name 						 &seq_num_start, &seq_num_end);
588*5113495bSYour Name 	}
589*5113495bSYour Name #ifdef DEBUG_DMA_DONE
590*5113495bSYour Name 	pdev->htt_pdev->rx_ring.dbg_initial_msdu_payld =
591*5113495bSYour Name 		pdev->htt_pdev->rx_ring.sw_rd_idx.msdu_payld;
592*5113495bSYour Name #endif
593*5113495bSYour Name 
594*5113495bSYour Name 	for (mpdu_range = 0; mpdu_range < num_mpdu_ranges; mpdu_range++) {
595*5113495bSYour Name 		enum htt_rx_status status;
596*5113495bSYour Name 		int i, num_mpdus;
597*5113495bSYour Name 		qdf_nbuf_t head_msdu, tail_msdu, msdu;
598*5113495bSYour Name 		void *rx_mpdu_desc;
599*5113495bSYour Name 
600*5113495bSYour Name #ifdef DEBUG_DMA_DONE
601*5113495bSYour Name 		pdev->htt_pdev->rx_ring.dbg_mpdu_range = mpdu_range;
602*5113495bSYour Name #endif
603*5113495bSYour Name 
604*5113495bSYour Name 		htt_rx_ind_mpdu_range_info(pdev->htt_pdev, rx_ind_msg,
605*5113495bSYour Name 					   mpdu_range, &status, &num_mpdus);
606*5113495bSYour Name 		if ((status == htt_rx_status_ok) && peer) {
607*5113495bSYour Name 			TXRX_STATS_ADD(pdev, priv.rx.normal.mpdus, num_mpdus);
608*5113495bSYour Name 			/* valid frame - deposit it into rx reordering buffer */
609*5113495bSYour Name 			for (i = 0; i < num_mpdus; i++) {
610*5113495bSYour Name 				int msdu_chaining;
611*5113495bSYour Name 				/*
612*5113495bSYour Name 				 * Get a linked list of the MSDUs that comprise
613*5113495bSYour Name 				 * this MPDU.
614*5113495bSYour Name 				 * This also attaches each rx MSDU descriptor to
615*5113495bSYour Name 				 * the corresponding rx MSDU network buffer.
616*5113495bSYour Name 				 * (In some systems, the rx MSDU desc is already
617*5113495bSYour Name 				 * in the same buffer as the MSDU payload; in
618*5113495bSYour Name 				 * other systems they are separate, so a pointer
619*5113495bSYour Name 				 * needs to be set in the netbuf to locate the
620*5113495bSYour Name 				 * corresponding rx descriptor.)
621*5113495bSYour Name 				 *
622*5113495bSYour Name 				 * It is necessary to call htt_rx_amsdu_pop
623*5113495bSYour Name 				 * before htt_rx_mpdu_desc_list_next, because
624*5113495bSYour Name 				 * the (MPDU) rx descriptor has DMA unmapping
625*5113495bSYour Name 				 * done during the htt_rx_amsdu_pop call.
626*5113495bSYour Name 				 * The rx desc should not be accessed until this
627*5113495bSYour Name 				 * DMA unmapping has been done, since the DMA
628*5113495bSYour Name 				 * unmapping involves making sure the cache area
629*5113495bSYour Name 				 * for the mapped buffer is flushed, so the data
630*5113495bSYour Name 				 * written by the MAC DMA into memory will be
631*5113495bSYour Name 				 * fetched, rather than garbage from the cache.
632*5113495bSYour Name 				 */
633*5113495bSYour Name 
634*5113495bSYour Name #ifdef DEBUG_DMA_DONE
635*5113495bSYour Name 				pdev->htt_pdev->rx_ring.dbg_mpdu_count = i;
636*5113495bSYour Name #endif
637*5113495bSYour Name 
638*5113495bSYour Name 				msdu_chaining =
639*5113495bSYour Name 					htt_rx_amsdu_pop(htt_pdev,
640*5113495bSYour Name 							 rx_ind_msg,
641*5113495bSYour Name 							 &head_msdu,
642*5113495bSYour Name 							 &tail_msdu,
643*5113495bSYour Name 							 &msdu_count);
644*5113495bSYour Name #ifdef HTT_RX_RESTORE
645*5113495bSYour Name 				if (htt_pdev->rx_ring.rx_reset) {
646*5113495bSYour Name 					ol_rx_trigger_restore(htt_pdev,
647*5113495bSYour Name 							      head_msdu,
648*5113495bSYour Name 							      tail_msdu);
649*5113495bSYour Name 					OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(
650*5113495bSYour Name 									pdev);
651*5113495bSYour Name 					return;
652*5113495bSYour Name 				}
653*5113495bSYour Name #endif
654*5113495bSYour Name 				rx_mpdu_desc =
655*5113495bSYour Name 					htt_rx_mpdu_desc_list_next(htt_pdev,
656*5113495bSYour Name 								   rx_ind_msg);
657*5113495bSYour Name 				ret = htt_rx_msdu_center_freq(htt_pdev, peer,
658*5113495bSYour Name 					rx_mpdu_desc, &center_freq, &chan1,
659*5113495bSYour Name 					&chan2, &phymode);
660*5113495bSYour Name 				if (ret == true) {
661*5113495bSYour Name 					peer->last_pkt_center_freq =
662*5113495bSYour Name 						center_freq;
663*5113495bSYour Name 				} else {
664*5113495bSYour Name 					peer->last_pkt_center_freq = 0;
665*5113495bSYour Name 				}
666*5113495bSYour Name 
667*5113495bSYour Name 				/* Pktlog */
668*5113495bSYour Name 				ol_rx_send_pktlog_event(pdev, peer,
669*5113495bSYour Name 							head_msdu, 1);
670*5113495bSYour Name 
671*5113495bSYour Name 				if (msdu_chaining) {
672*5113495bSYour Name 					/*
673*5113495bSYour Name 					 * TBDXXX - to deliver SDU with
674*5113495bSYour Name 					 * chaining, we need to stitch those
675*5113495bSYour Name 					 * scattered buffers into one single
676*5113495bSYour Name 					 * buffer.
677*5113495bSYour Name 					 * Just discard it now.
678*5113495bSYour Name 					 */
679*5113495bSYour Name 					chain_msdus(htt_pdev,
680*5113495bSYour Name 						    head_msdu,
681*5113495bSYour Name 						    tail_msdu);
682*5113495bSYour Name 				} else {
683*5113495bSYour Name 					process_reorder(pdev, rx_mpdu_desc,
684*5113495bSYour Name 							tid, peer,
685*5113495bSYour Name 							head_msdu, tail_msdu,
686*5113495bSYour Name 							num_mpdu_ranges,
687*5113495bSYour Name 							num_mpdus,
688*5113495bSYour Name 							rx_ind_release);
689*5113495bSYour Name 				}
690*5113495bSYour Name 
691*5113495bSYour Name 			}
692*5113495bSYour Name 		} else {
693*5113495bSYour Name 			/* invalid frames - discard them */
694*5113495bSYour Name 			OL_RX_REORDER_TRACE_ADD(pdev, tid,
695*5113495bSYour Name 						TXRX_SEQ_NUM_ERR(status),
696*5113495bSYour Name 						TXRX_SEQ_NUM_ERR(status),
697*5113495bSYour Name 						num_mpdus);
698*5113495bSYour Name 			TXRX_STATS_ADD(pdev, priv.rx.err.mpdu_bad, num_mpdus);
699*5113495bSYour Name 			for (i = 0; i < num_mpdus; i++) {
700*5113495bSYour Name 				/* pull the MPDU's MSDUs off the buffer queue */
701*5113495bSYour Name 				htt_rx_amsdu_pop(htt_pdev, rx_ind_msg, &msdu,
702*5113495bSYour Name 						 &tail_msdu, &msdu_count);
703*5113495bSYour Name #ifdef HTT_RX_RESTORE
704*5113495bSYour Name 				if (htt_pdev->rx_ring.rx_reset) {
705*5113495bSYour Name 					ol_rx_trigger_restore(htt_pdev, msdu,
706*5113495bSYour Name 							      tail_msdu);
707*5113495bSYour Name 					OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(
708*5113495bSYour Name 									pdev);
709*5113495bSYour Name 					return;
710*5113495bSYour Name 				}
711*5113495bSYour Name #endif
712*5113495bSYour Name 				/* pull the MPDU desc off the desc queue */
713*5113495bSYour Name 				rx_mpdu_desc =
714*5113495bSYour Name 					htt_rx_mpdu_desc_list_next(htt_pdev,
715*5113495bSYour Name 								   rx_ind_msg);
716*5113495bSYour Name 				OL_RX_ERR_STATISTICS_2(pdev, vdev, peer,
717*5113495bSYour Name 						       rx_mpdu_desc, msdu,
718*5113495bSYour Name 						       status);
719*5113495bSYour Name 
720*5113495bSYour Name 				if (status == htt_rx_status_tkip_mic_err &&
721*5113495bSYour Name 				    vdev && peer) {
722*5113495bSYour Name 					union htt_rx_pn_t pn;
723*5113495bSYour Name 					uint8_t key_id;
724*5113495bSYour Name 
725*5113495bSYour Name 					htt_rx_mpdu_desc_pn(
726*5113495bSYour Name 						pdev->htt_pdev,
727*5113495bSYour Name 						htt_rx_msdu_desc_retrieve(
728*5113495bSYour Name 							pdev->htt_pdev,
729*5113495bSYour Name 							msdu), &pn, 48);
730*5113495bSYour Name 					if (htt_rx_msdu_desc_key_id(
731*5113495bSYour Name 						    pdev->htt_pdev,
732*5113495bSYour Name 						    htt_rx_msdu_desc_retrieve(
733*5113495bSYour Name 							    pdev->htt_pdev,
734*5113495bSYour Name 							    msdu),
735*5113495bSYour Name 						    &key_id) == true) {
736*5113495bSYour Name 						ol_rx_send_mic_err_ind(
737*5113495bSYour Name 							vdev->pdev,
738*5113495bSYour Name 							vdev->vdev_id,
739*5113495bSYour Name 							peer->mac_addr.raw,
740*5113495bSYour Name 							tid, 0,
741*5113495bSYour Name 							OL_RX_ERR_TKIP_MIC,
742*5113495bSYour Name 							msdu, &pn.pn48,
743*5113495bSYour Name 							key_id);
744*5113495bSYour Name 					}
745*5113495bSYour Name 				}
746*5113495bSYour Name 
747*5113495bSYour Name 				if (status != htt_rx_status_ctrl_mgmt_null) {
748*5113495bSYour Name 					/* Pktlog */
749*5113495bSYour Name 					ol_rx_send_pktlog_event(pdev,
750*5113495bSYour Name 						 peer, msdu, 1);
751*5113495bSYour Name 				}
752*5113495bSYour Name 
753*5113495bSYour Name 				if (status == htt_rx_status_err_inv_peer) {
754*5113495bSYour Name 					/* once per mpdu */
755*5113495bSYour Name 					ol_rx_process_inv_peer(pdev,
756*5113495bSYour Name 							       rx_mpdu_desc,
757*5113495bSYour Name 							       msdu);
758*5113495bSYour Name 				}
759*5113495bSYour Name 
760*5113495bSYour Name 				while (1) {
761*5113495bSYour Name 					/* Free the nbuf */
762*5113495bSYour Name 					qdf_nbuf_t next;
763*5113495bSYour Name 
764*5113495bSYour Name 					next = qdf_nbuf_next(msdu);
765*5113495bSYour Name 					htt_rx_desc_frame_free(htt_pdev, msdu);
766*5113495bSYour Name 					if (msdu == tail_msdu)
767*5113495bSYour Name 						break;
768*5113495bSYour Name 					msdu = next;
769*5113495bSYour Name 				}
770*5113495bSYour Name 			}
771*5113495bSYour Name 		}
772*5113495bSYour Name 	}
773*5113495bSYour Name 	/*
774*5113495bSYour Name 	 * Now that a whole batch of MSDUs have been pulled out of HTT
775*5113495bSYour Name 	 * and put into the rx reorder array, it is an appropriate time
776*5113495bSYour Name 	 * to request HTT to provide new rx MSDU buffers for the target
777*5113495bSYour Name 	 * to fill.
778*5113495bSYour Name 	 * This could be done after the end of this function, but it's
779*5113495bSYour Name 	 * better to do it now, rather than waiting until after the driver
780*5113495bSYour Name 	 * and OS finish processing the batch of rx MSDUs.
781*5113495bSYour Name 	 */
782*5113495bSYour Name 	htt_rx_msdu_buff_replenish(htt_pdev);
783*5113495bSYour Name 
784*5113495bSYour Name 	if ((true == rx_ind_release) && peer && vdev) {
785*5113495bSYour Name 		ol_rx_reorder_release(vdev, peer, tid, seq_num_start,
786*5113495bSYour Name 				      seq_num_end);
787*5113495bSYour Name 	}
788*5113495bSYour Name 	OL_RX_REORDER_TIMEOUT_UPDATE(peer, tid);
789*5113495bSYour Name 	OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(pdev);
790*5113495bSYour Name 
791*5113495bSYour Name 	if (pdev->rx.flags.defrag_timeout_check)
792*5113495bSYour Name 		ol_rx_defrag_waitlist_flush(pdev);
793*5113495bSYour Name }
794*5113495bSYour Name #endif
795*5113495bSYour Name 
796*5113495bSYour Name void
ol_rx_sec_ind_handler(ol_txrx_pdev_handle pdev,uint16_t peer_id,enum htt_sec_type sec_type,int is_unicast,uint32_t * michael_key,uint32_t * rx_pn)797*5113495bSYour Name ol_rx_sec_ind_handler(ol_txrx_pdev_handle pdev,
798*5113495bSYour Name 		      uint16_t peer_id,
799*5113495bSYour Name 		      enum htt_sec_type sec_type,
800*5113495bSYour Name 		      int is_unicast, uint32_t *michael_key, uint32_t *rx_pn)
801*5113495bSYour Name {
802*5113495bSYour Name 	struct ol_txrx_peer_t *peer;
803*5113495bSYour Name 	int sec_index, i;
804*5113495bSYour Name 
805*5113495bSYour Name 	peer = ol_txrx_peer_find_by_id(pdev, peer_id);
806*5113495bSYour Name 	if (!peer) {
807*5113495bSYour Name 		ol_txrx_err(
808*5113495bSYour Name 			"Couldn't find peer from ID %d - skipping security inits\n",
809*5113495bSYour Name 			peer_id);
810*5113495bSYour Name 		return;
811*5113495bSYour Name 	}
812*5113495bSYour Name 	ol_txrx_dbg(
813*5113495bSYour Name 		"sec spec for peer %pK ("QDF_MAC_ADDR_FMT"): %s key of type %d\n",
814*5113495bSYour Name 		peer,
815*5113495bSYour Name 		QDF_MAC_ADDR_REF(peer->mac_addr.raw),
816*5113495bSYour Name 		is_unicast ? "ucast" : "mcast", sec_type);
817*5113495bSYour Name 	sec_index = is_unicast ? txrx_sec_ucast : txrx_sec_mcast;
818*5113495bSYour Name 	peer->security[sec_index].sec_type = sec_type;
819*5113495bSYour Name 	/*
820*5113495bSYour Name 	 * michael key only valid for TKIP
821*5113495bSYour Name 	 * but for simplicity, copy it anyway
822*5113495bSYour Name 	 */
823*5113495bSYour Name 	qdf_mem_copy(&peer->security[sec_index].michael_key[0],
824*5113495bSYour Name 		     michael_key,
825*5113495bSYour Name 		     sizeof(peer->security[sec_index].michael_key));
826*5113495bSYour Name 
827*5113495bSYour Name 	if (sec_type != htt_sec_type_wapi) {
828*5113495bSYour Name 		qdf_mem_zero(peer->tids_last_pn_valid,
829*5113495bSYour Name 			    OL_TXRX_NUM_EXT_TIDS);
830*5113495bSYour Name 	} else if (sec_index == txrx_sec_mcast || peer->tids_last_pn_valid[0]) {
831*5113495bSYour Name 		for (i = 0; i < OL_TXRX_NUM_EXT_TIDS; i++) {
832*5113495bSYour Name 			/*
833*5113495bSYour Name 			 * Setting PN valid bit for WAPI sec_type,
834*5113495bSYour Name 			 * since WAPI PN has to be started with predefined value
835*5113495bSYour Name 			 */
836*5113495bSYour Name 			peer->tids_last_pn_valid[i] = 1;
837*5113495bSYour Name 			qdf_mem_copy((uint8_t *) &peer->tids_last_pn[i],
838*5113495bSYour Name 				     (uint8_t *) rx_pn,
839*5113495bSYour Name 				     sizeof(union htt_rx_pn_t));
840*5113495bSYour Name 			peer->tids_last_pn[i].pn128[1] =
841*5113495bSYour Name 				qdf_cpu_to_le64(
842*5113495bSYour Name 					peer->tids_last_pn[i].pn128[1]);
843*5113495bSYour Name 			peer->tids_last_pn[i].pn128[0] =
844*5113495bSYour Name 				qdf_cpu_to_le64(
845*5113495bSYour Name 					peer->tids_last_pn[i].pn128[0]);
846*5113495bSYour Name 			if (sec_index == txrx_sec_ucast)
847*5113495bSYour Name 				peer->tids_rekey_flag[i] = 1;
848*5113495bSYour Name 		}
849*5113495bSYour Name 	}
850*5113495bSYour Name }
851*5113495bSYour Name 
ol_rx_notify(struct cdp_cfg * cfg_pdev,uint8_t vdev_id,uint8_t * peer_mac_addr,int tid,uint32_t tsf32,enum ol_rx_notify_type notify_type,qdf_nbuf_t rx_frame)852*5113495bSYour Name void ol_rx_notify(struct cdp_cfg *cfg_pdev,
853*5113495bSYour Name 		  uint8_t vdev_id,
854*5113495bSYour Name 		  uint8_t *peer_mac_addr,
855*5113495bSYour Name 		  int tid,
856*5113495bSYour Name 		  uint32_t tsf32,
857*5113495bSYour Name 		  enum ol_rx_notify_type notify_type, qdf_nbuf_t rx_frame)
858*5113495bSYour Name {
859*5113495bSYour Name 	/*
860*5113495bSYour Name 	 * NOTE: This is used in qca_main for AP mode to handle IGMP
861*5113495bSYour Name 	 * packets specially. Umac has a corresponding handler for this
862*5113495bSYour Name 	 * not sure if we need to have this for CLD as well.
863*5113495bSYour Name 	 */
864*5113495bSYour Name }
865*5113495bSYour Name 
866*5113495bSYour Name #ifdef WLAN_PARTIAL_REORDER_OFFLOAD
867*5113495bSYour Name /**
868*5113495bSYour Name  * @brief Look into a rx MSDU to see what kind of special handling it requires
869*5113495bSYour Name  * @details
870*5113495bSYour Name  *      This function is called when the host rx SW sees that the target
871*5113495bSYour Name  *      rx FW has marked a rx MSDU as needing inspection.
872*5113495bSYour Name  *      Based on the results of the inspection, the host rx SW will infer
873*5113495bSYour Name  *      what special handling to perform on the rx frame.
874*5113495bSYour Name  *      Currently, the only type of frames that require special handling
875*5113495bSYour Name  *      are IGMP frames.  The rx data-path SW checks if the frame is IGMP
876*5113495bSYour Name  *      (it should be, since the target would not have set the inspect flag
877*5113495bSYour Name  *      otherwise), and then calls the ol_rx_notify function so the
878*5113495bSYour Name  *      control-path SW can perform multicast group membership learning
879*5113495bSYour Name  *      by sniffing the IGMP frame.
880*5113495bSYour Name  */
881*5113495bSYour Name #define SIZEOF_80211_HDR (sizeof(struct ieee80211_frame))
882*5113495bSYour Name static void
ol_rx_inspect(struct ol_txrx_vdev_t * vdev,struct ol_txrx_peer_t * peer,unsigned int tid,qdf_nbuf_t msdu,void * rx_desc)883*5113495bSYour Name ol_rx_inspect(struct ol_txrx_vdev_t *vdev,
884*5113495bSYour Name 	      struct ol_txrx_peer_t *peer,
885*5113495bSYour Name 	      unsigned int tid, qdf_nbuf_t msdu, void *rx_desc)
886*5113495bSYour Name {
887*5113495bSYour Name 	ol_txrx_pdev_handle pdev = vdev->pdev;
888*5113495bSYour Name 	uint8_t *data, *l3_hdr;
889*5113495bSYour Name 	uint16_t ethertype;
890*5113495bSYour Name 	int offset;
891*5113495bSYour Name 
892*5113495bSYour Name 	data = qdf_nbuf_data(msdu);
893*5113495bSYour Name 	if (pdev->frame_format == wlan_frm_fmt_native_wifi) {
894*5113495bSYour Name 		offset = SIZEOF_80211_HDR + LLC_SNAP_HDR_OFFSET_ETHERTYPE;
895*5113495bSYour Name 		l3_hdr = data + SIZEOF_80211_HDR + LLC_SNAP_HDR_LEN;
896*5113495bSYour Name 	} else {
897*5113495bSYour Name 		offset = QDF_MAC_ADDR_SIZE * 2;
898*5113495bSYour Name 		l3_hdr = data + ETHERNET_HDR_LEN;
899*5113495bSYour Name 	}
900*5113495bSYour Name 	ethertype = (data[offset] << 8) | data[offset + 1];
901*5113495bSYour Name 	if (ethertype == ETHERTYPE_IPV4) {
902*5113495bSYour Name 		offset = IPV4_HDR_OFFSET_PROTOCOL;
903*5113495bSYour Name 		if (l3_hdr[offset] == IP_PROTOCOL_IGMP) {
904*5113495bSYour Name 			ol_rx_notify(pdev->ctrl_pdev,
905*5113495bSYour Name 				     vdev->vdev_id,
906*5113495bSYour Name 				     peer->mac_addr.raw,
907*5113495bSYour Name 				     tid,
908*5113495bSYour Name 				     htt_rx_mpdu_desc_tsf32(pdev->htt_pdev,
909*5113495bSYour Name 							    rx_desc),
910*5113495bSYour Name 				     OL_RX_NOTIFY_IPV4_IGMP, msdu);
911*5113495bSYour Name 		}
912*5113495bSYour Name 	}
913*5113495bSYour Name }
914*5113495bSYour Name #endif
915*5113495bSYour Name 
916*5113495bSYour Name void
ol_rx_offload_deliver_ind_handler(ol_txrx_pdev_handle pdev,qdf_nbuf_t msg,uint16_t msdu_cnt)917*5113495bSYour Name ol_rx_offload_deliver_ind_handler(ol_txrx_pdev_handle pdev,
918*5113495bSYour Name 				  qdf_nbuf_t msg, uint16_t msdu_cnt)
919*5113495bSYour Name {
920*5113495bSYour Name 	int vdev_id, peer_id, tid;
921*5113495bSYour Name 	qdf_nbuf_t head_buf, tail_buf, buf;
922*5113495bSYour Name 	struct ol_txrx_peer_t *peer;
923*5113495bSYour Name 	uint8_t fw_desc;
924*5113495bSYour Name 	htt_pdev_handle htt_pdev = pdev->htt_pdev;
925*5113495bSYour Name 
926*5113495bSYour Name 	if (msdu_cnt > htt_rx_offload_msdu_cnt(htt_pdev)) {
927*5113495bSYour Name 		ol_txrx_err("invalid msdu_cnt=%u", msdu_cnt);
928*5113495bSYour Name 
929*5113495bSYour Name 		if (pdev->cfg.is_high_latency)
930*5113495bSYour Name 			htt_rx_desc_frame_free(htt_pdev, msg);
931*5113495bSYour Name 
932*5113495bSYour Name 		return;
933*5113495bSYour Name 	}
934*5113495bSYour Name 
935*5113495bSYour Name 	while (msdu_cnt) {
936*5113495bSYour Name 		if (!htt_rx_offload_msdu_pop(htt_pdev, msg, &vdev_id, &peer_id,
937*5113495bSYour Name 					&tid, &fw_desc, &head_buf, &tail_buf)) {
938*5113495bSYour Name 			peer = ol_txrx_peer_find_by_id(pdev, peer_id);
939*5113495bSYour Name 			if (peer) {
940*5113495bSYour Name 				ol_rx_data_process(peer, head_buf);
941*5113495bSYour Name 			} else {
942*5113495bSYour Name 				buf = head_buf;
943*5113495bSYour Name 				while (1) {
944*5113495bSYour Name 					qdf_nbuf_t next;
945*5113495bSYour Name 
946*5113495bSYour Name 					next = qdf_nbuf_next(buf);
947*5113495bSYour Name 					htt_rx_desc_frame_free(htt_pdev, buf);
948*5113495bSYour Name 					if (buf == tail_buf)
949*5113495bSYour Name 						break;
950*5113495bSYour Name 					buf = next;
951*5113495bSYour Name 				}
952*5113495bSYour Name 			}
953*5113495bSYour Name 		}
954*5113495bSYour Name 		msdu_cnt--;
955*5113495bSYour Name 	}
956*5113495bSYour Name 	htt_rx_msdu_buff_replenish(htt_pdev);
957*5113495bSYour Name }
958*5113495bSYour Name 
959*5113495bSYour Name void
ol_rx_send_mic_err_ind(struct ol_txrx_pdev_t * pdev,uint8_t vdev_id,uint8_t * peer_mac_addr,int tid,uint32_t tsf32,enum ol_rx_err_type err_type,qdf_nbuf_t rx_frame,uint64_t * pn,uint8_t key_id)960*5113495bSYour Name ol_rx_send_mic_err_ind(struct ol_txrx_pdev_t *pdev, uint8_t vdev_id,
961*5113495bSYour Name 		       uint8_t *peer_mac_addr, int tid, uint32_t tsf32,
962*5113495bSYour Name 		       enum ol_rx_err_type err_type, qdf_nbuf_t rx_frame,
963*5113495bSYour Name 		       uint64_t *pn, uint8_t key_id)
964*5113495bSYour Name {
965*5113495bSYour Name 	struct cdp_rx_mic_err_info mic_failure_info;
966*5113495bSYour Name 	qdf_ether_header_t *eth_hdr;
967*5113495bSYour Name 	struct ol_if_ops *tops = NULL;
968*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
969*5113495bSYour Name 	ol_txrx_soc_handle ol_txrx_soc = &soc->cdp_soc;
970*5113495bSYour Name 
971*5113495bSYour Name 	if (err_type != OL_RX_ERR_TKIP_MIC)
972*5113495bSYour Name 		return;
973*5113495bSYour Name 
974*5113495bSYour Name 	if (qdf_nbuf_len(rx_frame) < sizeof(*eth_hdr))
975*5113495bSYour Name 		return;
976*5113495bSYour Name 
977*5113495bSYour Name 	eth_hdr = (qdf_ether_header_t *)qdf_nbuf_data(rx_frame);
978*5113495bSYour Name 
979*5113495bSYour Name 	qdf_copy_macaddr((struct qdf_mac_addr *)&mic_failure_info.ta_mac_addr,
980*5113495bSYour Name 			 (struct qdf_mac_addr *)peer_mac_addr);
981*5113495bSYour Name 	qdf_copy_macaddr((struct qdf_mac_addr *)&mic_failure_info.da_mac_addr,
982*5113495bSYour Name 			 (struct qdf_mac_addr *)eth_hdr->ether_dhost);
983*5113495bSYour Name 	mic_failure_info.key_id = key_id;
984*5113495bSYour Name 	mic_failure_info.multicast =
985*5113495bSYour Name 		IEEE80211_IS_MULTICAST(eth_hdr->ether_dhost);
986*5113495bSYour Name 	qdf_mem_copy(mic_failure_info.tsc, pn, SIR_CIPHER_SEQ_CTR_SIZE);
987*5113495bSYour Name 	mic_failure_info.frame_type = cdp_rx_frame_type_802_3;
988*5113495bSYour Name 	mic_failure_info.data = NULL;
989*5113495bSYour Name 	mic_failure_info.vdev_id = vdev_id;
990*5113495bSYour Name 
991*5113495bSYour Name 	tops = ol_txrx_soc->ol_ops;
992*5113495bSYour Name 	if (tops->rx_mic_error)
993*5113495bSYour Name 		tops->rx_mic_error(soc->psoc, pdev->id, &mic_failure_info);
994*5113495bSYour Name }
995*5113495bSYour Name 
996*5113495bSYour Name void
ol_rx_mic_error_handler(ol_txrx_pdev_handle pdev,u_int8_t tid,u_int16_t peer_id,void * msdu_desc,qdf_nbuf_t msdu)997*5113495bSYour Name ol_rx_mic_error_handler(
998*5113495bSYour Name 	ol_txrx_pdev_handle pdev,
999*5113495bSYour Name 	u_int8_t tid,
1000*5113495bSYour Name 	u_int16_t peer_id,
1001*5113495bSYour Name 	void *msdu_desc,
1002*5113495bSYour Name 	qdf_nbuf_t msdu)
1003*5113495bSYour Name {
1004*5113495bSYour Name 	union htt_rx_pn_t pn = {0};
1005*5113495bSYour Name 	u_int8_t key_id = 0;
1006*5113495bSYour Name 
1007*5113495bSYour Name 	struct ol_txrx_peer_t *peer = NULL;
1008*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev = NULL;
1009*5113495bSYour Name 
1010*5113495bSYour Name 	if (pdev) {
1011*5113495bSYour Name 		TXRX_STATS_MSDU_INCR(pdev, rx.dropped_mic_err, msdu);
1012*5113495bSYour Name 		peer = ol_txrx_peer_find_by_id(pdev, peer_id);
1013*5113495bSYour Name 		if (peer) {
1014*5113495bSYour Name 			vdev = peer->vdev;
1015*5113495bSYour Name 			if (vdev) {
1016*5113495bSYour Name 				htt_rx_mpdu_desc_pn(vdev->pdev->htt_pdev,
1017*5113495bSYour Name 						    msdu_desc, &pn, 48);
1018*5113495bSYour Name 
1019*5113495bSYour Name 				if (htt_rx_msdu_desc_key_id(
1020*5113495bSYour Name 					vdev->pdev->htt_pdev, msdu_desc,
1021*5113495bSYour Name 					&key_id) == true) {
1022*5113495bSYour Name 					ol_rx_send_mic_err_ind(vdev->pdev,
1023*5113495bSYour Name 						vdev->vdev_id,
1024*5113495bSYour Name 						peer->mac_addr.raw, tid, 0,
1025*5113495bSYour Name 						OL_RX_ERR_TKIP_MIC, msdu,
1026*5113495bSYour Name 						&pn.pn48, key_id);
1027*5113495bSYour Name 				}
1028*5113495bSYour Name 			}
1029*5113495bSYour Name 		}
1030*5113495bSYour Name 		/* Pktlog */
1031*5113495bSYour Name 		ol_rx_send_pktlog_event(pdev, peer, msdu, 1);
1032*5113495bSYour Name 	}
1033*5113495bSYour Name }
1034*5113495bSYour Name 
1035*5113495bSYour Name #ifdef WLAN_PARTIAL_REORDER_OFFLOAD
1036*5113495bSYour Name /**
1037*5113495bSYour Name  * @brief Check the first msdu to decide whether the a-msdu should be accepted.
1038*5113495bSYour Name  */
1039*5113495bSYour Name static bool
ol_rx_filter(struct ol_txrx_vdev_t * vdev,struct ol_txrx_peer_t * peer,qdf_nbuf_t msdu,void * rx_desc)1040*5113495bSYour Name ol_rx_filter(struct ol_txrx_vdev_t *vdev,
1041*5113495bSYour Name 	     struct ol_txrx_peer_t *peer, qdf_nbuf_t msdu, void *rx_desc)
1042*5113495bSYour Name {
1043*5113495bSYour Name #define FILTER_STATUS_REJECT 1
1044*5113495bSYour Name #define FILTER_STATUS_ACCEPT 0
1045*5113495bSYour Name 	uint8_t *wh;
1046*5113495bSYour Name 	uint32_t offset = 0;
1047*5113495bSYour Name 	uint16_t ether_type = 0;
1048*5113495bSYour Name 	bool is_encrypted = false, is_mcast = false;
1049*5113495bSYour Name 	uint8_t i;
1050*5113495bSYour Name 	enum privacy_filter_packet_type packet_type =
1051*5113495bSYour Name 		PRIVACY_FILTER_PACKET_UNICAST;
1052*5113495bSYour Name 	ol_txrx_pdev_handle pdev = vdev->pdev;
1053*5113495bSYour Name 	htt_pdev_handle htt_pdev = pdev->htt_pdev;
1054*5113495bSYour Name 	int sec_idx;
1055*5113495bSYour Name 
1056*5113495bSYour Name 	/*
1057*5113495bSYour Name 	 * Safemode must avoid the PrivacyExemptionList and
1058*5113495bSYour Name 	 * ExcludeUnencrypted checking
1059*5113495bSYour Name 	 */
1060*5113495bSYour Name 	if (vdev->safemode)
1061*5113495bSYour Name 		return FILTER_STATUS_ACCEPT;
1062*5113495bSYour Name 
1063*5113495bSYour Name 	is_mcast = htt_rx_msdu_is_wlan_mcast(htt_pdev, rx_desc);
1064*5113495bSYour Name 	if (vdev->num_filters > 0) {
1065*5113495bSYour Name 		if (pdev->frame_format == wlan_frm_fmt_native_wifi) {
1066*5113495bSYour Name 			offset = SIZEOF_80211_HDR +
1067*5113495bSYour Name 				LLC_SNAP_HDR_OFFSET_ETHERTYPE;
1068*5113495bSYour Name 		} else {
1069*5113495bSYour Name 			offset = QDF_MAC_ADDR_SIZE * 2;
1070*5113495bSYour Name 		}
1071*5113495bSYour Name 		/* get header info from msdu */
1072*5113495bSYour Name 		wh = qdf_nbuf_data(msdu);
1073*5113495bSYour Name 
1074*5113495bSYour Name 		/* get ether type */
1075*5113495bSYour Name 		ether_type = (wh[offset] << 8) | wh[offset + 1];
1076*5113495bSYour Name 		/* get packet type */
1077*5113495bSYour Name 		if (true == is_mcast)
1078*5113495bSYour Name 			packet_type = PRIVACY_FILTER_PACKET_MULTICAST;
1079*5113495bSYour Name 		else
1080*5113495bSYour Name 			packet_type = PRIVACY_FILTER_PACKET_UNICAST;
1081*5113495bSYour Name 	}
1082*5113495bSYour Name 	/* get encrypt info */
1083*5113495bSYour Name 	is_encrypted = htt_rx_mpdu_is_encrypted(htt_pdev, rx_desc);
1084*5113495bSYour Name #ifdef ATH_SUPPORT_WAPI
1085*5113495bSYour Name 	if ((true == is_encrypted) && (ETHERTYPE_WAI == ether_type)) {
1086*5113495bSYour Name 		/*
1087*5113495bSYour Name 		 * We expect the WAI frames to be always unencrypted when
1088*5113495bSYour Name 		 * the UMAC gets it
1089*5113495bSYour Name 		 */
1090*5113495bSYour Name 		return FILTER_STATUS_REJECT;
1091*5113495bSYour Name 	}
1092*5113495bSYour Name #endif /* ATH_SUPPORT_WAPI */
1093*5113495bSYour Name 
1094*5113495bSYour Name 	for (i = 0; i < vdev->num_filters; i++) {
1095*5113495bSYour Name 		enum privacy_filter filter_type;
1096*5113495bSYour Name 		enum privacy_filter_packet_type filter_packet_type;
1097*5113495bSYour Name 
1098*5113495bSYour Name 		/* skip if the ether type does not match */
1099*5113495bSYour Name 		if (vdev->privacy_filters[i].ether_type != ether_type)
1100*5113495bSYour Name 			continue;
1101*5113495bSYour Name 
1102*5113495bSYour Name 		/* skip if the packet type does not match */
1103*5113495bSYour Name 		filter_packet_type = vdev->privacy_filters[i].packet_type;
1104*5113495bSYour Name 		if (filter_packet_type != packet_type &&
1105*5113495bSYour Name 		    filter_packet_type != PRIVACY_FILTER_PACKET_BOTH) {
1106*5113495bSYour Name 			continue;
1107*5113495bSYour Name 		}
1108*5113495bSYour Name 
1109*5113495bSYour Name 		filter_type = vdev->privacy_filters[i].filter_type;
1110*5113495bSYour Name 		if (filter_type == PRIVACY_FILTER_ALWAYS) {
1111*5113495bSYour Name 			/*
1112*5113495bSYour Name 			 * In this case, we accept the frame if and only if
1113*5113495bSYour Name 			 * it was originally NOT encrypted.
1114*5113495bSYour Name 			 */
1115*5113495bSYour Name 			if (true == is_encrypted)
1116*5113495bSYour Name 				return FILTER_STATUS_REJECT;
1117*5113495bSYour Name 			else
1118*5113495bSYour Name 				return FILTER_STATUS_ACCEPT;
1119*5113495bSYour Name 
1120*5113495bSYour Name 		} else if (filter_type == PRIVACY_FILTER_KEY_UNAVAILABLE) {
1121*5113495bSYour Name 			/*
1122*5113495bSYour Name 			 * In this case, we reject the frame if it was
1123*5113495bSYour Name 			 * originally NOT encrypted but we have the key mapping
1124*5113495bSYour Name 			 * key for this frame.
1125*5113495bSYour Name 			 */
1126*5113495bSYour Name 			if (!is_encrypted &&
1127*5113495bSYour Name 			    !is_mcast &&
1128*5113495bSYour Name 			    (peer->security[txrx_sec_ucast].sec_type !=
1129*5113495bSYour Name 			     htt_sec_type_none) &&
1130*5113495bSYour Name 			    (peer->keyinstalled || !ETHERTYPE_IS_EAPOL_WAPI(
1131*5113495bSYour Name 				    ether_type))) {
1132*5113495bSYour Name 				return FILTER_STATUS_REJECT;
1133*5113495bSYour Name 			} else {
1134*5113495bSYour Name 				return FILTER_STATUS_ACCEPT;
1135*5113495bSYour Name 			}
1136*5113495bSYour Name 		} else {
1137*5113495bSYour Name 			/*
1138*5113495bSYour Name 			 * The privacy exemption does not apply to this frame.
1139*5113495bSYour Name 			 */
1140*5113495bSYour Name 			break;
1141*5113495bSYour Name 		}
1142*5113495bSYour Name 	}
1143*5113495bSYour Name 
1144*5113495bSYour Name 	/*
1145*5113495bSYour Name 	 * If the privacy exemption list does not apply to the frame,
1146*5113495bSYour Name 	 * check ExcludeUnencrypted.
1147*5113495bSYour Name 	 * If ExcludeUnencrypted is not set, or if this was oringially
1148*5113495bSYour Name 	 * an encrypted frame, it will be accepted.
1149*5113495bSYour Name 	 */
1150*5113495bSYour Name 	if (!vdev->drop_unenc || (true == is_encrypted))
1151*5113495bSYour Name 		return FILTER_STATUS_ACCEPT;
1152*5113495bSYour Name 
1153*5113495bSYour Name 	/*
1154*5113495bSYour Name 	 *  If this is a open connection, it will be accepted.
1155*5113495bSYour Name 	 */
1156*5113495bSYour Name 	sec_idx = (true == is_mcast) ? txrx_sec_mcast : txrx_sec_ucast;
1157*5113495bSYour Name 	if (peer->security[sec_idx].sec_type == htt_sec_type_none)
1158*5113495bSYour Name 		return FILTER_STATUS_ACCEPT;
1159*5113495bSYour Name 
1160*5113495bSYour Name 	if ((false == is_encrypted) && vdev->drop_unenc) {
1161*5113495bSYour Name 		OL_RX_ERR_STATISTICS(pdev, vdev, OL_RX_ERR_PRIVACY,
1162*5113495bSYour Name 				     pdev->sec_types[htt_sec_type_none],
1163*5113495bSYour Name 				     is_mcast);
1164*5113495bSYour Name 	}
1165*5113495bSYour Name 	return FILTER_STATUS_REJECT;
1166*5113495bSYour Name }
1167*5113495bSYour Name #endif
1168*5113495bSYour Name 
1169*5113495bSYour Name #ifdef WLAN_FEATURE_TSF_PLUS
1170*5113495bSYour Name #ifdef CONFIG_HL_SUPPORT
ol_rx_timestamp(struct cdp_cfg * cfg_pdev,void * rx_desc,qdf_nbuf_t msdu)1171*5113495bSYour Name void ol_rx_timestamp(struct cdp_cfg *cfg_pdev,
1172*5113495bSYour Name 		     void *rx_desc, qdf_nbuf_t msdu)
1173*5113495bSYour Name {
1174*5113495bSYour Name 	struct htt_rx_ppdu_desc_t *rx_ppdu_desc;
1175*5113495bSYour Name 
1176*5113495bSYour Name 	if (!ol_cfg_is_ptp_rx_opt_enabled(cfg_pdev))
1177*5113495bSYour Name 		return;
1178*5113495bSYour Name 
1179*5113495bSYour Name 	if (!rx_desc || !msdu)
1180*5113495bSYour Name 		return;
1181*5113495bSYour Name 
1182*5113495bSYour Name 	rx_ppdu_desc = (struct htt_rx_ppdu_desc_t *)((uint8_t *)(rx_desc) -
1183*5113495bSYour Name 			HTT_RX_IND_HL_BYTES + HTT_RX_IND_HDR_PREFIX_BYTES);
1184*5113495bSYour Name 	msdu->tstamp = ns_to_ktime((u_int64_t)rx_ppdu_desc->tsf32 *
1185*5113495bSYour Name 				   NSEC_PER_USEC);
1186*5113495bSYour Name }
1187*5113495bSYour Name 
ol_rx_timestamp_update(ol_txrx_pdev_handle pdev,qdf_nbuf_t head_msdu,qdf_nbuf_t tail_msdu)1188*5113495bSYour Name static inline void ol_rx_timestamp_update(ol_txrx_pdev_handle pdev,
1189*5113495bSYour Name 					  qdf_nbuf_t head_msdu,
1190*5113495bSYour Name 					  qdf_nbuf_t tail_msdu)
1191*5113495bSYour Name {
1192*5113495bSYour Name 	qdf_nbuf_t loop_msdu;
1193*5113495bSYour Name 	struct htt_host_rx_desc_base *rx_desc;
1194*5113495bSYour Name 
1195*5113495bSYour Name 	loop_msdu = head_msdu;
1196*5113495bSYour Name 	while (loop_msdu) {
1197*5113495bSYour Name 		rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, loop_msdu);
1198*5113495bSYour Name 		ol_rx_timestamp(pdev->ctrl_pdev, rx_desc, loop_msdu);
1199*5113495bSYour Name 		loop_msdu = qdf_nbuf_next(loop_msdu);
1200*5113495bSYour Name 	}
1201*5113495bSYour Name }
1202*5113495bSYour Name #else
ol_rx_timestamp(struct cdp_cfg * cfg_pdev,void * rx_desc,qdf_nbuf_t msdu)1203*5113495bSYour Name void ol_rx_timestamp(struct cdp_cfg *cfg_pdev,
1204*5113495bSYour Name 		     void *rx_desc, qdf_nbuf_t msdu)
1205*5113495bSYour Name {
1206*5113495bSYour Name 	struct htt_host_rx_desc_base *rx_mpdu_desc = rx_desc;
1207*5113495bSYour Name 	uint32_t tsf64_low32, tsf64_high32;
1208*5113495bSYour Name 	uint64_t tsf64, tsf64_ns;
1209*5113495bSYour Name 
1210*5113495bSYour Name 	if (!ol_cfg_is_ptp_rx_opt_enabled(cfg_pdev))
1211*5113495bSYour Name 		return;
1212*5113495bSYour Name 
1213*5113495bSYour Name 	if (!rx_mpdu_desc || !msdu)
1214*5113495bSYour Name 		return;
1215*5113495bSYour Name 
1216*5113495bSYour Name 	tsf64_low32 = rx_mpdu_desc->ppdu_end.wb_timestamp_lower_32;
1217*5113495bSYour Name 	tsf64_high32 = rx_mpdu_desc->ppdu_end.wb_timestamp_upper_32;
1218*5113495bSYour Name 
1219*5113495bSYour Name 	tsf64 = (uint64_t)tsf64_high32 << 32 | tsf64_low32;
1220*5113495bSYour Name 	if (tsf64 * NSEC_PER_USEC < tsf64)
1221*5113495bSYour Name 		tsf64_ns = 0;
1222*5113495bSYour Name 	else
1223*5113495bSYour Name 		tsf64_ns = tsf64 * NSEC_PER_USEC;
1224*5113495bSYour Name 
1225*5113495bSYour Name 	msdu->tstamp = ns_to_ktime(tsf64_ns);
1226*5113495bSYour Name }
1227*5113495bSYour Name 
1228*5113495bSYour Name /**
1229*5113495bSYour Name  * ol_rx_timestamp_update() - update msdu tsf64 timestamp
1230*5113495bSYour Name  * @pdev: pointer to txrx handle
1231*5113495bSYour Name  * @head_msdu: pointer to head msdu
1232*5113495bSYour Name  * @tail_msdu: pointer to tail msdu
1233*5113495bSYour Name  *
1234*5113495bSYour Name  * Return: none
1235*5113495bSYour Name  */
ol_rx_timestamp_update(ol_txrx_pdev_handle pdev,qdf_nbuf_t head_msdu,qdf_nbuf_t tail_msdu)1236*5113495bSYour Name static inline void ol_rx_timestamp_update(ol_txrx_pdev_handle pdev,
1237*5113495bSYour Name 					  qdf_nbuf_t head_msdu,
1238*5113495bSYour Name 					  qdf_nbuf_t tail_msdu)
1239*5113495bSYour Name {
1240*5113495bSYour Name 	qdf_nbuf_t loop_msdu;
1241*5113495bSYour Name 	uint64_t hostime, detlahostime, tsf64_time;
1242*5113495bSYour Name 	struct htt_host_rx_desc_base *rx_desc;
1243*5113495bSYour Name 
1244*5113495bSYour Name 	if (!ol_cfg_is_ptp_rx_opt_enabled(pdev->ctrl_pdev))
1245*5113495bSYour Name 		return;
1246*5113495bSYour Name 
1247*5113495bSYour Name 	if (!tail_msdu)
1248*5113495bSYour Name 		return;
1249*5113495bSYour Name 
1250*5113495bSYour Name 	hostime = ktime_get_ns();
1251*5113495bSYour Name 	rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, tail_msdu);
1252*5113495bSYour Name 	if (rx_desc->ppdu_end.wb_timestamp_lower_32 == 0 &&
1253*5113495bSYour Name 	    rx_desc->ppdu_end.wb_timestamp_upper_32 == 0) {
1254*5113495bSYour Name 		detlahostime = hostime - pdev->last_host_time;
1255*5113495bSYour Name 		do_div(detlahostime, NSEC_PER_USEC);
1256*5113495bSYour Name 		tsf64_time = pdev->last_tsf64_time + detlahostime;
1257*5113495bSYour Name 
1258*5113495bSYour Name 		rx_desc->ppdu_end.wb_timestamp_lower_32 =
1259*5113495bSYour Name 						tsf64_time & 0xFFFFFFFF;
1260*5113495bSYour Name 		rx_desc->ppdu_end.wb_timestamp_upper_32 = tsf64_time >> 32;
1261*5113495bSYour Name 	} else {
1262*5113495bSYour Name 		pdev->last_host_time = hostime;
1263*5113495bSYour Name 		pdev->last_tsf64_time =
1264*5113495bSYour Name 		  (uint64_t)rx_desc->ppdu_end.wb_timestamp_upper_32 << 32 |
1265*5113495bSYour Name 		  rx_desc->ppdu_end.wb_timestamp_lower_32;
1266*5113495bSYour Name 	}
1267*5113495bSYour Name 
1268*5113495bSYour Name 	loop_msdu = head_msdu;
1269*5113495bSYour Name 	while (loop_msdu) {
1270*5113495bSYour Name 		ol_rx_timestamp(pdev->ctrl_pdev, rx_desc, loop_msdu);
1271*5113495bSYour Name 		loop_msdu = qdf_nbuf_next(loop_msdu);
1272*5113495bSYour Name 	}
1273*5113495bSYour Name }
1274*5113495bSYour Name #endif
1275*5113495bSYour Name #else
ol_rx_timestamp(struct cdp_cfg * cfg_pdev,void * rx_desc,qdf_nbuf_t msdu)1276*5113495bSYour Name void ol_rx_timestamp(struct cdp_cfg *cfg_pdev,
1277*5113495bSYour Name 		     void *rx_desc, qdf_nbuf_t msdu)
1278*5113495bSYour Name {
1279*5113495bSYour Name }
1280*5113495bSYour Name 
ol_rx_timestamp_update(ol_txrx_pdev_handle pdev,qdf_nbuf_t head_msdu,qdf_nbuf_t tail_msdu)1281*5113495bSYour Name static inline void ol_rx_timestamp_update(ol_txrx_pdev_handle pdev,
1282*5113495bSYour Name 					  qdf_nbuf_t head_msdu,
1283*5113495bSYour Name 					  qdf_nbuf_t tail_msdu)
1284*5113495bSYour Name {
1285*5113495bSYour Name }
1286*5113495bSYour Name #endif
1287*5113495bSYour Name 
1288*5113495bSYour Name #ifdef WLAN_FEATURE_DSRC
1289*5113495bSYour Name static inline
ol_rx_ocb_prepare_rx_stats_header(struct ol_txrx_vdev_t * vdev,struct ol_txrx_peer_t * peer,qdf_nbuf_t msdu)1290*5113495bSYour Name void ol_rx_ocb_prepare_rx_stats_header(struct ol_txrx_vdev_t *vdev,
1291*5113495bSYour Name 				       struct ol_txrx_peer_t *peer,
1292*5113495bSYour Name 				       qdf_nbuf_t msdu)
1293*5113495bSYour Name {
1294*5113495bSYour Name 	int i;
1295*5113495bSYour Name 	struct ol_txrx_ocb_chan_info *chan_info = 0;
1296*5113495bSYour Name 	int packet_freq = peer->last_pkt_center_freq;
1297*5113495bSYour Name 
1298*5113495bSYour Name 	for (i = 0; i < vdev->ocb_channel_count; i++) {
1299*5113495bSYour Name 		if (vdev->ocb_channel_info[i].chan_freq == packet_freq) {
1300*5113495bSYour Name 			chan_info = &vdev->ocb_channel_info[i];
1301*5113495bSYour Name 			break;
1302*5113495bSYour Name 		}
1303*5113495bSYour Name 	}
1304*5113495bSYour Name 
1305*5113495bSYour Name 	if (!chan_info || !chan_info->disable_rx_stats_hdr) {
1306*5113495bSYour Name 		qdf_ether_header_t eth_header = { {0} };
1307*5113495bSYour Name 		struct ocb_rx_stats_hdr_t rx_header = {0};
1308*5113495bSYour Name 
1309*5113495bSYour Name 		/*
1310*5113495bSYour Name 		 * Construct the RX stats header and
1311*5113495bSYour Name 		 * push that to the frontof the packet.
1312*5113495bSYour Name 		 */
1313*5113495bSYour Name 		rx_header.version = 1;
1314*5113495bSYour Name 		rx_header.length = sizeof(rx_header);
1315*5113495bSYour Name 		rx_header.channel_freq = peer->last_pkt_center_freq;
1316*5113495bSYour Name 		rx_header.rssi_cmb = peer->last_pkt_rssi_cmb;
1317*5113495bSYour Name 		qdf_mem_copy(rx_header.rssi, peer->last_pkt_rssi,
1318*5113495bSYour Name 			     sizeof(rx_header.rssi));
1319*5113495bSYour Name 
1320*5113495bSYour Name 		if (peer->last_pkt_legacy_rate_sel)
1321*5113495bSYour Name 			rx_header.datarate = 0xFF;
1322*5113495bSYour Name 		else if (peer->last_pkt_legacy_rate == 0x8)
1323*5113495bSYour Name 			rx_header.datarate = 6;
1324*5113495bSYour Name 		else if (peer->last_pkt_legacy_rate == 0x9)
1325*5113495bSYour Name 			rx_header.datarate = 4;
1326*5113495bSYour Name 		else if (peer->last_pkt_legacy_rate == 0xA)
1327*5113495bSYour Name 			rx_header.datarate = 2;
1328*5113495bSYour Name 		else if (peer->last_pkt_legacy_rate == 0xB)
1329*5113495bSYour Name 			rx_header.datarate = 0;
1330*5113495bSYour Name 		else if (peer->last_pkt_legacy_rate == 0xC)
1331*5113495bSYour Name 			rx_header.datarate = 7;
1332*5113495bSYour Name 		else if (peer->last_pkt_legacy_rate == 0xD)
1333*5113495bSYour Name 			rx_header.datarate = 5;
1334*5113495bSYour Name 		else if (peer->last_pkt_legacy_rate == 0xE)
1335*5113495bSYour Name 			rx_header.datarate = 3;
1336*5113495bSYour Name 		else if (peer->last_pkt_legacy_rate == 0xF)
1337*5113495bSYour Name 			rx_header.datarate = 1;
1338*5113495bSYour Name 		else
1339*5113495bSYour Name 			rx_header.datarate = 0xFF;
1340*5113495bSYour Name 
1341*5113495bSYour Name 		rx_header.timestamp_microsec =
1342*5113495bSYour Name 			 peer->last_pkt_timestamp_microsec;
1343*5113495bSYour Name 		rx_header.timestamp_submicrosec =
1344*5113495bSYour Name 			 peer->last_pkt_timestamp_submicrosec;
1345*5113495bSYour Name 		rx_header.tsf32 = peer->last_pkt_tsf;
1346*5113495bSYour Name 		rx_header.ext_tid = peer->last_pkt_tid;
1347*5113495bSYour Name 
1348*5113495bSYour Name 		qdf_nbuf_push_head(msdu, sizeof(rx_header));
1349*5113495bSYour Name 		qdf_mem_copy(qdf_nbuf_data(msdu),
1350*5113495bSYour Name 			     &rx_header, sizeof(rx_header));
1351*5113495bSYour Name 
1352*5113495bSYour Name 		/*
1353*5113495bSYour Name 		 * Construct the ethernet header with
1354*5113495bSYour Name 		 * type 0x8152 and push that to the
1355*5113495bSYour Name 		 * front of the packet to indicate the
1356*5113495bSYour Name 		 * RX stats header.
1357*5113495bSYour Name 		 */
1358*5113495bSYour Name 		eth_header.ether_type = QDF_SWAP_U16(ETHERTYPE_OCB_RX);
1359*5113495bSYour Name 		qdf_nbuf_push_head(msdu, sizeof(eth_header));
1360*5113495bSYour Name 		qdf_mem_copy(qdf_nbuf_data(msdu), &eth_header,
1361*5113495bSYour Name 			     sizeof(eth_header));
1362*5113495bSYour Name 	}
1363*5113495bSYour Name }
1364*5113495bSYour Name #else
1365*5113495bSYour Name static inline
ol_rx_ocb_prepare_rx_stats_header(struct ol_txrx_vdev_t * vdev,struct ol_txrx_peer_t * peer,qdf_nbuf_t msdu)1366*5113495bSYour Name void ol_rx_ocb_prepare_rx_stats_header(struct ol_txrx_vdev_t *vdev,
1367*5113495bSYour Name 				       struct ol_txrx_peer_t *peer,
1368*5113495bSYour Name 				       qdf_nbuf_t msdu)
1369*5113495bSYour Name {
1370*5113495bSYour Name }
1371*5113495bSYour Name #endif
1372*5113495bSYour Name 
1373*5113495bSYour Name #ifdef WLAN_PARTIAL_REORDER_OFFLOAD
1374*5113495bSYour Name void
ol_rx_deliver(struct ol_txrx_vdev_t * vdev,struct ol_txrx_peer_t * peer,unsigned int tid,qdf_nbuf_t msdu_list)1375*5113495bSYour Name ol_rx_deliver(struct ol_txrx_vdev_t *vdev,
1376*5113495bSYour Name 	      struct ol_txrx_peer_t *peer, unsigned int tid,
1377*5113495bSYour Name 	      qdf_nbuf_t msdu_list)
1378*5113495bSYour Name {
1379*5113495bSYour Name 	ol_txrx_pdev_handle pdev = vdev->pdev;
1380*5113495bSYour Name 	htt_pdev_handle htt_pdev = pdev->htt_pdev;
1381*5113495bSYour Name 	qdf_nbuf_t deliver_list_head = NULL;
1382*5113495bSYour Name 	qdf_nbuf_t deliver_list_tail = NULL;
1383*5113495bSYour Name 	qdf_nbuf_t msdu;
1384*5113495bSYour Name 	bool filter = false;
1385*5113495bSYour Name #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
1386*5113495bSYour Name 	struct ol_rx_decap_info_t info;
1387*5113495bSYour Name 
1388*5113495bSYour Name 	qdf_mem_zero(&info, sizeof(info));
1389*5113495bSYour Name #endif
1390*5113495bSYour Name 
1391*5113495bSYour Name 	msdu = msdu_list;
1392*5113495bSYour Name 	/*
1393*5113495bSYour Name 	 * Check each MSDU to see whether it requires special handling,
1394*5113495bSYour Name 	 * and free each MSDU's rx descriptor
1395*5113495bSYour Name 	 */
1396*5113495bSYour Name 	while (msdu) {
1397*5113495bSYour Name 		void *rx_desc;
1398*5113495bSYour Name 		int discard, inspect, dummy_fwd;
1399*5113495bSYour Name 		qdf_nbuf_t next = qdf_nbuf_next(msdu);
1400*5113495bSYour Name 
1401*5113495bSYour Name 		rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, msdu);
1402*5113495bSYour Name 		/* for HL, point to payload right now*/
1403*5113495bSYour Name 		if (pdev->cfg.is_high_latency) {
1404*5113495bSYour Name 			qdf_nbuf_pull_head(msdu,
1405*5113495bSYour Name 				htt_rx_msdu_rx_desc_size_hl(htt_pdev, rx_desc));
1406*5113495bSYour Name 		}
1407*5113495bSYour Name 
1408*5113495bSYour Name #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
1409*5113495bSYour Name 		info.is_msdu_cmpl_mpdu =
1410*5113495bSYour Name 			htt_rx_msdu_desc_completes_mpdu(htt_pdev, rx_desc);
1411*5113495bSYour Name 		info.is_first_subfrm =
1412*5113495bSYour Name 			htt_rx_msdu_first_msdu_flag(htt_pdev, rx_desc);
1413*5113495bSYour Name 		if (OL_RX_DECAP(vdev, peer, msdu, &info) != A_OK) {
1414*5113495bSYour Name 			discard = 1;
1415*5113495bSYour Name 			ol_txrx_dbg(
1416*5113495bSYour Name 				"decap error %pK from peer %pK ("QDF_MAC_ADDR_FMT") len %d\n",
1417*5113495bSYour Name 				msdu, peer,
1418*5113495bSYour Name 				QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1419*5113495bSYour Name 				qdf_nbuf_len(msdu));
1420*5113495bSYour Name 			goto DONE;
1421*5113495bSYour Name 		}
1422*5113495bSYour Name #endif
1423*5113495bSYour Name 		htt_rx_msdu_actions(pdev->htt_pdev, rx_desc, &discard,
1424*5113495bSYour Name 				    &dummy_fwd, &inspect);
1425*5113495bSYour Name 		if (inspect)
1426*5113495bSYour Name 			ol_rx_inspect(vdev, peer, tid, msdu, rx_desc);
1427*5113495bSYour Name 
1428*5113495bSYour Name 		/*
1429*5113495bSYour Name 		 * Check the first msdu in the mpdu, if it will be filtered out,
1430*5113495bSYour Name 		 * then discard the entire mpdu.
1431*5113495bSYour Name 		 */
1432*5113495bSYour Name 		if (htt_rx_msdu_first_msdu_flag(htt_pdev, rx_desc))
1433*5113495bSYour Name 			filter = ol_rx_filter(vdev, peer, msdu, rx_desc);
1434*5113495bSYour Name 
1435*5113495bSYour Name #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
1436*5113495bSYour Name DONE:
1437*5113495bSYour Name #endif
1438*5113495bSYour Name 		htt_rx_msdu_desc_free(htt_pdev, msdu);
1439*5113495bSYour Name 		if (discard || (true == filter)) {
1440*5113495bSYour Name 			ol_txrx_frms_dump("rx discarding:",
1441*5113495bSYour Name 					  pdev, deliver_list_head,
1442*5113495bSYour Name 					  ol_txrx_frm_dump_tcp_seq |
1443*5113495bSYour Name 					  ol_txrx_frm_dump_contents,
1444*5113495bSYour Name 					  0 /* don't print contents */);
1445*5113495bSYour Name 			qdf_nbuf_free(msdu);
1446*5113495bSYour Name 			/*
1447*5113495bSYour Name 			 * If discarding packet is last packet of the delivery
1448*5113495bSYour Name 			 * list, NULL terminator should be added
1449*5113495bSYour Name 			 * for delivery list.
1450*5113495bSYour Name 			 */
1451*5113495bSYour Name 			if (!next && deliver_list_head) {
1452*5113495bSYour Name 				/* add NULL terminator */
1453*5113495bSYour Name 				qdf_nbuf_set_next(deliver_list_tail, NULL);
1454*5113495bSYour Name 			}
1455*5113495bSYour Name 		} else {
1456*5113495bSYour Name 			/*
1457*5113495bSYour Name 			 *  If this is for OCB,
1458*5113495bSYour Name 			 *  then prepend the RX stats header.
1459*5113495bSYour Name 			 */
1460*5113495bSYour Name 			if (vdev->opmode == wlan_op_mode_ocb)
1461*5113495bSYour Name 				ol_rx_ocb_prepare_rx_stats_header(vdev, peer,
1462*5113495bSYour Name 								  msdu);
1463*5113495bSYour Name 
1464*5113495bSYour Name 			OL_RX_PEER_STATS_UPDATE(peer, msdu);
1465*5113495bSYour Name 			OL_RX_ERR_STATISTICS_1(pdev, vdev, peer, rx_desc,
1466*5113495bSYour Name 					       OL_RX_ERR_NONE);
1467*5113495bSYour Name 			TXRX_STATS_MSDU_INCR(vdev->pdev, rx.delivered, msdu);
1468*5113495bSYour Name 
1469*5113495bSYour Name 			ol_rx_timestamp(pdev->ctrl_pdev, rx_desc, msdu);
1470*5113495bSYour Name 			OL_TXRX_LIST_APPEND(deliver_list_head,
1471*5113495bSYour Name 					    deliver_list_tail, msdu);
1472*5113495bSYour Name 			QDF_NBUF_CB_DP_TRACE_PRINT(msdu) = false;
1473*5113495bSYour Name 			qdf_dp_trace_set_track(msdu, QDF_RX);
1474*5113495bSYour Name 		}
1475*5113495bSYour Name 		msdu = next;
1476*5113495bSYour Name 	}
1477*5113495bSYour Name 	/* sanity check - are there any frames left to give to the OS shim? */
1478*5113495bSYour Name 	if (!deliver_list_head)
1479*5113495bSYour Name 		return;
1480*5113495bSYour Name 
1481*5113495bSYour Name 	ol_txrx_frms_dump("rx delivering:",
1482*5113495bSYour Name 			  pdev, deliver_list_head,
1483*5113495bSYour Name 			  ol_txrx_frm_dump_tcp_seq | ol_txrx_frm_dump_contents,
1484*5113495bSYour Name 			  0 /* don't print contents */);
1485*5113495bSYour Name 
1486*5113495bSYour Name 	ol_rx_data_process(peer, deliver_list_head);
1487*5113495bSYour Name }
1488*5113495bSYour Name #endif
1489*5113495bSYour Name 
1490*5113495bSYour Name void
ol_rx_discard(struct ol_txrx_vdev_t * vdev,struct ol_txrx_peer_t * peer,unsigned int tid,qdf_nbuf_t msdu_list)1491*5113495bSYour Name ol_rx_discard(struct ol_txrx_vdev_t *vdev,
1492*5113495bSYour Name 	      struct ol_txrx_peer_t *peer, unsigned int tid,
1493*5113495bSYour Name 	      qdf_nbuf_t msdu_list)
1494*5113495bSYour Name {
1495*5113495bSYour Name 	while (msdu_list) {
1496*5113495bSYour Name 		qdf_nbuf_t msdu = msdu_list;
1497*5113495bSYour Name 
1498*5113495bSYour Name 		msdu_list = qdf_nbuf_next(msdu_list);
1499*5113495bSYour Name 		ol_txrx_dbg("discard rx %pK", msdu);
1500*5113495bSYour Name 		qdf_nbuf_free(msdu);
1501*5113495bSYour Name 	}
1502*5113495bSYour Name }
1503*5113495bSYour Name 
ol_rx_peer_init(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer)1504*5113495bSYour Name void ol_rx_peer_init(struct ol_txrx_pdev_t *pdev, struct ol_txrx_peer_t *peer)
1505*5113495bSYour Name {
1506*5113495bSYour Name 	uint8_t tid;
1507*5113495bSYour Name 
1508*5113495bSYour Name 	for (tid = 0; tid < OL_TXRX_NUM_EXT_TIDS; tid++) {
1509*5113495bSYour Name 		ol_rx_reorder_init(&peer->tids_rx_reorder[tid], tid);
1510*5113495bSYour Name 
1511*5113495bSYour Name 		/* invalid sequence number */
1512*5113495bSYour Name 		peer->tids_last_seq[tid] = IEEE80211_SEQ_MAX;
1513*5113495bSYour Name 		/* invalid reorder index number */
1514*5113495bSYour Name 		peer->tids_next_rel_idx[tid] = INVALID_REORDER_INDEX;
1515*5113495bSYour Name 
1516*5113495bSYour Name 	}
1517*5113495bSYour Name 	/*
1518*5113495bSYour Name 	 * Set security defaults: no PN check, no security.
1519*5113495bSYour Name 	 * The target may send a HTT SEC_IND message to overwrite
1520*5113495bSYour Name 	 * these defaults.
1521*5113495bSYour Name 	 */
1522*5113495bSYour Name 	peer->security[txrx_sec_ucast].sec_type =
1523*5113495bSYour Name 		peer->security[txrx_sec_mcast].sec_type = htt_sec_type_none;
1524*5113495bSYour Name 	peer->keyinstalled = 0;
1525*5113495bSYour Name 
1526*5113495bSYour Name 	peer->last_assoc_rcvd = 0;
1527*5113495bSYour Name 	peer->last_disassoc_rcvd = 0;
1528*5113495bSYour Name 	peer->last_deauth_rcvd = 0;
1529*5113495bSYour Name 
1530*5113495bSYour Name 	qdf_atomic_init(&peer->fw_pn_check);
1531*5113495bSYour Name }
1532*5113495bSYour Name 
1533*5113495bSYour Name void
ol_rx_peer_cleanup(struct ol_txrx_vdev_t * vdev,struct ol_txrx_peer_t * peer)1534*5113495bSYour Name ol_rx_peer_cleanup(struct ol_txrx_vdev_t *vdev, struct ol_txrx_peer_t *peer)
1535*5113495bSYour Name {
1536*5113495bSYour Name 	peer->keyinstalled = 0;
1537*5113495bSYour Name 	peer->last_assoc_rcvd = 0;
1538*5113495bSYour Name 	peer->last_disassoc_rcvd = 0;
1539*5113495bSYour Name 	peer->last_deauth_rcvd = 0;
1540*5113495bSYour Name 	ol_rx_reorder_peer_cleanup(vdev, peer);
1541*5113495bSYour Name }
1542*5113495bSYour Name 
1543*5113495bSYour Name /*
1544*5113495bSYour Name  * Free frames including both rx descriptors and buffers
1545*5113495bSYour Name  */
ol_rx_frames_free(htt_pdev_handle htt_pdev,qdf_nbuf_t frames)1546*5113495bSYour Name void ol_rx_frames_free(htt_pdev_handle htt_pdev, qdf_nbuf_t frames)
1547*5113495bSYour Name {
1548*5113495bSYour Name 	qdf_nbuf_t next, frag = frames;
1549*5113495bSYour Name 
1550*5113495bSYour Name 	while (frag) {
1551*5113495bSYour Name 		next = qdf_nbuf_next(frag);
1552*5113495bSYour Name 		htt_rx_desc_frame_free(htt_pdev, frag);
1553*5113495bSYour Name 		frag = next;
1554*5113495bSYour Name 	}
1555*5113495bSYour Name }
1556*5113495bSYour Name 
1557*5113495bSYour Name #ifdef WLAN_FULL_REORDER_OFFLOAD
1558*5113495bSYour Name void
ol_rx_in_order_indication_handler(ol_txrx_pdev_handle pdev,qdf_nbuf_t rx_ind_msg,uint16_t peer_id,uint8_t tid,uint8_t is_offload)1559*5113495bSYour Name ol_rx_in_order_indication_handler(ol_txrx_pdev_handle pdev,
1560*5113495bSYour Name 				  qdf_nbuf_t rx_ind_msg,
1561*5113495bSYour Name 				  uint16_t peer_id,
1562*5113495bSYour Name 				  uint8_t tid, uint8_t is_offload)
1563*5113495bSYour Name {
1564*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev = NULL;
1565*5113495bSYour Name 	struct ol_txrx_peer_t *peer = NULL;
1566*5113495bSYour Name 	struct ol_txrx_peer_t *peer_head = NULL;
1567*5113495bSYour Name 	htt_pdev_handle htt_pdev = NULL;
1568*5113495bSYour Name 	int status;
1569*5113495bSYour Name 	qdf_nbuf_t head_msdu = NULL, tail_msdu = NULL;
1570*5113495bSYour Name 	uint8_t *rx_ind_data;
1571*5113495bSYour Name 	uint32_t *msg_word;
1572*5113495bSYour Name 	uint32_t msdu_count;
1573*5113495bSYour Name 	uint8_t pktlog_bit;
1574*5113495bSYour Name 	uint32_t filled = 0;
1575*5113495bSYour Name 	uint8_t bssid[QDF_MAC_ADDR_SIZE];
1576*5113495bSYour Name 	bool offloaded_pkt;
1577*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
1578*5113495bSYour Name 
1579*5113495bSYour Name 	if (qdf_unlikely(!soc))
1580*5113495bSYour Name 		return;
1581*5113495bSYour Name 
1582*5113495bSYour Name 	if (tid >= OL_TXRX_NUM_EXT_TIDS) {
1583*5113495bSYour Name 		ol_txrx_err("invalid tid, %u", tid);
1584*5113495bSYour Name 		WARN_ON(1);
1585*5113495bSYour Name 		return;
1586*5113495bSYour Name 	}
1587*5113495bSYour Name 
1588*5113495bSYour Name 	if (pdev) {
1589*5113495bSYour Name 		if (qdf_unlikely(QDF_GLOBAL_MONITOR_MODE == cds_get_conparam()))
1590*5113495bSYour Name 			peer = pdev->self_peer;
1591*5113495bSYour Name 		else
1592*5113495bSYour Name 			peer = ol_txrx_peer_find_by_id(pdev, peer_id);
1593*5113495bSYour Name 		htt_pdev = pdev->htt_pdev;
1594*5113495bSYour Name 	} else {
1595*5113495bSYour Name 		ol_txrx_err("Invalid pdev passed!");
1596*5113495bSYour Name 		qdf_assert_always(pdev);
1597*5113495bSYour Name 		return;
1598*5113495bSYour Name 	}
1599*5113495bSYour Name 
1600*5113495bSYour Name #if defined(HELIUMPLUS_DEBUG)
1601*5113495bSYour Name 	qdf_print("rx_ind_msg 0x%pK peer_id %d tid %d is_offload %d",
1602*5113495bSYour Name 		  rx_ind_msg, peer_id, tid, is_offload);
1603*5113495bSYour Name #endif
1604*5113495bSYour Name 
1605*5113495bSYour Name 	pktlog_bit = (htt_rx_amsdu_rx_in_order_get_pktlog(rx_ind_msg) == 0x01);
1606*5113495bSYour Name 	rx_ind_data = qdf_nbuf_data(rx_ind_msg);
1607*5113495bSYour Name 	msg_word = (uint32_t *)rx_ind_data;
1608*5113495bSYour Name 	/* Get the total number of MSDUs */
1609*5113495bSYour Name 	msdu_count = HTT_RX_IN_ORD_PADDR_IND_MSDU_CNT_GET(*(msg_word + 1));
1610*5113495bSYour Name 
1611*5113495bSYour Name 	ol_rx_ind_record_event(msdu_count, OL_RX_INDICATION_POP_START);
1612*5113495bSYour Name 
1613*5113495bSYour Name 	/*
1614*5113495bSYour Name 	 * Get a linked list of the MSDUs in the rx in order indication.
1615*5113495bSYour Name 	 * This also attaches each rx MSDU descriptor to the
1616*5113495bSYour Name 	 * corresponding rx MSDU network buffer.
1617*5113495bSYour Name 	 */
1618*5113495bSYour Name 	status = htt_rx_amsdu_pop(htt_pdev, rx_ind_msg, &head_msdu,
1619*5113495bSYour Name 				  &tail_msdu, &msdu_count);
1620*5113495bSYour Name 	ol_rx_ind_record_event(status, OL_RX_INDICATION_POP_END);
1621*5113495bSYour Name 
1622*5113495bSYour Name 	if (qdf_unlikely(0 == status)) {
1623*5113495bSYour Name 		ol_txrx_warn("pop failed");
1624*5113495bSYour Name 		return;
1625*5113495bSYour Name 	}
1626*5113495bSYour Name 
1627*5113495bSYour Name 	/*
1628*5113495bSYour Name 	 * Replenish the rx buffer ring first to provide buffers to the target
1629*5113495bSYour Name 	 * rather than waiting for the indeterminate time taken by the OS
1630*5113495bSYour Name 	 * to consume the rx frames
1631*5113495bSYour Name 	 */
1632*5113495bSYour Name 	filled = htt_rx_msdu_buff_in_order_replenish(htt_pdev, msdu_count);
1633*5113495bSYour Name 	ol_rx_ind_record_event(filled, OL_RX_INDICATION_BUF_REPLENISH);
1634*5113495bSYour Name 
1635*5113495bSYour Name 	if (!head_msdu) {
1636*5113495bSYour Name 		ol_txrx_dbg("No packet to send to HDD");
1637*5113495bSYour Name 		return;
1638*5113495bSYour Name 	}
1639*5113495bSYour Name 
1640*5113495bSYour Name 	/* Send the chain of MSDUs to the OS */
1641*5113495bSYour Name 	/* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
1642*5113495bSYour Name 	qdf_nbuf_set_next(tail_msdu, NULL);
1643*5113495bSYour Name 
1644*5113495bSYour Name 	/* Packet Capture Mode */
1645*5113495bSYour Name 
1646*5113495bSYour Name 	if ((ucfg_pkt_capture_get_pktcap_mode((void *)soc->psoc) &
1647*5113495bSYour Name 	      PKT_CAPTURE_MODE_DATA_ONLY)) {
1648*5113495bSYour Name 		offloaded_pkt = ucfg_pkt_capture_rx_offloaded_pkt(rx_ind_msg);
1649*5113495bSYour Name 		if (peer) {
1650*5113495bSYour Name 			vdev = peer->vdev;
1651*5113495bSYour Name 			if (peer->vdev) {
1652*5113495bSYour Name 				qdf_spin_lock_bh(&pdev->peer_ref_mutex);
1653*5113495bSYour Name 				peer_head = TAILQ_FIRST(&vdev->peer_list);
1654*5113495bSYour Name 				qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
1655*5113495bSYour Name 				if (peer_head) {
1656*5113495bSYour Name 					qdf_spin_lock_bh(
1657*5113495bSYour Name 						&peer_head->peer_info_lock);
1658*5113495bSYour Name 					qdf_mem_copy(bssid,
1659*5113495bSYour Name 						     &peer_head->mac_addr.raw,
1660*5113495bSYour Name 						     QDF_MAC_ADDR_SIZE);
1661*5113495bSYour Name 					qdf_spin_unlock_bh(
1662*5113495bSYour Name 						&peer_head->peer_info_lock);
1663*5113495bSYour Name 
1664*5113495bSYour Name 					ucfg_pkt_capture_rx_msdu_process(
1665*5113495bSYour Name 							bssid, head_msdu,
1666*5113495bSYour Name 							peer->vdev->vdev_id,
1667*5113495bSYour Name 							htt_pdev);
1668*5113495bSYour Name 				}
1669*5113495bSYour Name 			}
1670*5113495bSYour Name 		} else if (offloaded_pkt) {
1671*5113495bSYour Name 			ucfg_pkt_capture_rx_msdu_process(
1672*5113495bSYour Name 						bssid, head_msdu,
1673*5113495bSYour Name 						HTT_INVALID_VDEV,
1674*5113495bSYour Name 						htt_pdev);
1675*5113495bSYour Name 
1676*5113495bSYour Name 			ucfg_pkt_capture_rx_drop_offload_pkt(head_msdu);
1677*5113495bSYour Name 			return;
1678*5113495bSYour Name 		}
1679*5113495bSYour Name 	}
1680*5113495bSYour Name 
1681*5113495bSYour Name 	/* Pktlog */
1682*5113495bSYour Name 	ol_rx_send_pktlog_event(pdev, peer, head_msdu, pktlog_bit);
1683*5113495bSYour Name 
1684*5113495bSYour Name 	/*
1685*5113495bSYour Name 	 * if this is an offload indication, peer id is carried in the
1686*5113495bSYour Name 	 * rx buffer
1687*5113495bSYour Name 	 */
1688*5113495bSYour Name 	if (peer) {
1689*5113495bSYour Name 		vdev = peer->vdev;
1690*5113495bSYour Name 	} else {
1691*5113495bSYour Name 		ol_txrx_dbg("Couldn't find peer from ID 0x%x", peer_id);
1692*5113495bSYour Name 		while (head_msdu) {
1693*5113495bSYour Name 			qdf_nbuf_t msdu = head_msdu;
1694*5113495bSYour Name 
1695*5113495bSYour Name 			head_msdu = qdf_nbuf_next(head_msdu);
1696*5113495bSYour Name 			TXRX_STATS_MSDU_INCR(pdev,
1697*5113495bSYour Name 				 rx.dropped_peer_invalid, msdu);
1698*5113495bSYour Name 			htt_rx_desc_frame_free(htt_pdev, msdu);
1699*5113495bSYour Name 		}
1700*5113495bSYour Name 		return;
1701*5113495bSYour Name 	}
1702*5113495bSYour Name 
1703*5113495bSYour Name 	/*Loop msdu to fill tstamp with tsf64 time in ol_rx_timestamp*/
1704*5113495bSYour Name 	ol_rx_timestamp_update(pdev, head_msdu, tail_msdu);
1705*5113495bSYour Name 
1706*5113495bSYour Name 	peer->rx_opt_proc(vdev, peer, tid, head_msdu);
1707*5113495bSYour Name }
1708*5113495bSYour Name #endif
1709*5113495bSYour Name 
1710*5113495bSYour Name #ifdef CONNECTIVITY_PKTLOG
1711*5113495bSYour Name /**
1712*5113495bSYour Name  * ol_rx_pkt_dump_call() - updates status and
1713*5113495bSYour Name  * calls packetdump callback to log rx packet
1714*5113495bSYour Name  *
1715*5113495bSYour Name  * @msdu: rx packet
1716*5113495bSYour Name  * @peer_id: peer id
1717*5113495bSYour Name  * @status: status of rx packet
1718*5113495bSYour Name  *
1719*5113495bSYour Name  * This function is used to update the status of rx packet
1720*5113495bSYour Name  * and then calls packetdump callback to log that packet.
1721*5113495bSYour Name  *
1722*5113495bSYour Name  * Return: None
1723*5113495bSYour Name  *
1724*5113495bSYour Name  */
ol_rx_pkt_dump_call(qdf_nbuf_t msdu,uint8_t peer_id,uint8_t status)1725*5113495bSYour Name void ol_rx_pkt_dump_call(
1726*5113495bSYour Name 	qdf_nbuf_t msdu,
1727*5113495bSYour Name 	uint8_t peer_id,
1728*5113495bSYour Name 	uint8_t status)
1729*5113495bSYour Name {
1730*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
1731*5113495bSYour Name 	ol_txrx_soc_handle soc_hdl = ol_txrx_soc_t_to_cdp_soc_t(soc);
1732*5113495bSYour Name 	struct ol_txrx_peer_t *peer = NULL;
1733*5113495bSYour Name 	ol_txrx_pktdump_cb packetdump_cb;
1734*5113495bSYour Name 	ol_txrx_pdev_handle pdev;
1735*5113495bSYour Name 
1736*5113495bSYour Name 	if (qdf_unlikely(!soc))
1737*5113495bSYour Name 		return;
1738*5113495bSYour Name 
1739*5113495bSYour Name 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
1740*5113495bSYour Name 	if (!pdev) {
1741*5113495bSYour Name 		ol_txrx_err("pdev is NULL");
1742*5113495bSYour Name 		return;
1743*5113495bSYour Name 	}
1744*5113495bSYour Name 
1745*5113495bSYour Name 	peer = ol_txrx_peer_find_by_id(pdev, peer_id);
1746*5113495bSYour Name 	if (!peer) {
1747*5113495bSYour Name 		ol_txrx_dbg("peer with peer id %d is NULL", peer_id);
1748*5113495bSYour Name 		return;
1749*5113495bSYour Name 	}
1750*5113495bSYour Name 
1751*5113495bSYour Name 	packetdump_cb = pdev->ol_rx_packetdump_cb;
1752*5113495bSYour Name 	if (packetdump_cb &&
1753*5113495bSYour Name 	    wlan_op_mode_sta == peer->vdev->opmode)
1754*5113495bSYour Name 		packetdump_cb(soc_hdl, OL_TXRX_PDEV_ID, peer->vdev->vdev_id,
1755*5113495bSYour Name 			      msdu, status, QDF_RX_DATA_PKT);
1756*5113495bSYour Name }
1757*5113495bSYour Name #endif
1758*5113495bSYour Name 
1759*5113495bSYour Name #ifdef WLAN_FULL_REORDER_OFFLOAD
1760*5113495bSYour Name /* the msdu_list passed here must be NULL terminated */
1761*5113495bSYour Name void
ol_rx_in_order_deliver(struct ol_txrx_vdev_t * vdev,struct ol_txrx_peer_t * peer,unsigned int tid,qdf_nbuf_t msdu_list)1762*5113495bSYour Name ol_rx_in_order_deliver(struct ol_txrx_vdev_t *vdev,
1763*5113495bSYour Name 		       struct ol_txrx_peer_t *peer,
1764*5113495bSYour Name 		       unsigned int tid, qdf_nbuf_t msdu_list)
1765*5113495bSYour Name {
1766*5113495bSYour Name 	qdf_nbuf_t msdu;
1767*5113495bSYour Name 
1768*5113495bSYour Name 	msdu = msdu_list;
1769*5113495bSYour Name 	/*
1770*5113495bSYour Name 	 * Currently, this does not check each MSDU to see whether it requires
1771*5113495bSYour Name 	 * special handling. MSDUs that need special handling (example: IGMP
1772*5113495bSYour Name 	 * frames) should be sent via a separate HTT message. Also, this does
1773*5113495bSYour Name 	 * not do rx->tx forwarding or filtering.
1774*5113495bSYour Name 	 */
1775*5113495bSYour Name 
1776*5113495bSYour Name 	while (msdu) {
1777*5113495bSYour Name 		qdf_nbuf_t next = qdf_nbuf_next(msdu);
1778*5113495bSYour Name 
1779*5113495bSYour Name 		DPTRACE(qdf_dp_trace(msdu,
1780*5113495bSYour Name 			QDF_DP_TRACE_RX_TXRX_PACKET_PTR_RECORD,
1781*5113495bSYour Name 			QDF_TRACE_DEFAULT_PDEV_ID,
1782*5113495bSYour Name 			qdf_nbuf_data_addr(msdu),
1783*5113495bSYour Name 			sizeof(qdf_nbuf_data(msdu)), QDF_RX));
1784*5113495bSYour Name 
1785*5113495bSYour Name 		OL_RX_PEER_STATS_UPDATE(peer, msdu);
1786*5113495bSYour Name 		OL_RX_ERR_STATISTICS_1(vdev->pdev, vdev, peer, rx_desc,
1787*5113495bSYour Name 				       OL_RX_ERR_NONE);
1788*5113495bSYour Name 		TXRX_STATS_MSDU_INCR(vdev->pdev, rx.delivered, msdu);
1789*5113495bSYour Name 
1790*5113495bSYour Name 		msdu = next;
1791*5113495bSYour Name 	}
1792*5113495bSYour Name 
1793*5113495bSYour Name 	ol_txrx_frms_dump("rx delivering:",
1794*5113495bSYour Name 			  pdev, deliver_list_head,
1795*5113495bSYour Name 			  ol_txrx_frm_dump_tcp_seq | ol_txrx_frm_dump_contents,
1796*5113495bSYour Name 			  0 /* don't print contents */);
1797*5113495bSYour Name 
1798*5113495bSYour Name 	ol_rx_data_process(peer, msdu_list);
1799*5113495bSYour Name }
1800*5113495bSYour Name #endif
1801*5113495bSYour Name 
1802*5113495bSYour Name #ifndef CONFIG_HL_SUPPORT
1803*5113495bSYour Name void
ol_rx_offload_paddr_deliver_ind_handler(htt_pdev_handle htt_pdev,uint32_t msdu_count,uint32_t * msg_word)1804*5113495bSYour Name ol_rx_offload_paddr_deliver_ind_handler(htt_pdev_handle htt_pdev,
1805*5113495bSYour Name 					uint32_t msdu_count,
1806*5113495bSYour Name 					uint32_t *msg_word)
1807*5113495bSYour Name {
1808*5113495bSYour Name 	int vdev_id, peer_id, tid;
1809*5113495bSYour Name 	qdf_nbuf_t head_buf, tail_buf, buf;
1810*5113495bSYour Name 	struct ol_txrx_peer_t *peer;
1811*5113495bSYour Name 	uint8_t fw_desc;
1812*5113495bSYour Name 	int msdu_iter = 0;
1813*5113495bSYour Name 
1814*5113495bSYour Name 	while (msdu_count) {
1815*5113495bSYour Name 		if (htt_rx_offload_paddr_msdu_pop_ll(
1816*5113495bSYour Name 						htt_pdev, msg_word, msdu_iter,
1817*5113495bSYour Name 						 &vdev_id, &peer_id, &tid,
1818*5113495bSYour Name 						 &fw_desc, &head_buf,
1819*5113495bSYour Name 						 &tail_buf)) {
1820*5113495bSYour Name 			msdu_iter++;
1821*5113495bSYour Name 			msdu_count--;
1822*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1823*5113495bSYour Name 				  "skip msg_word %pK, msdu #%d, continue next",
1824*5113495bSYour Name 				  msg_word, msdu_iter);
1825*5113495bSYour Name 			continue;
1826*5113495bSYour Name 		}
1827*5113495bSYour Name 
1828*5113495bSYour Name 		peer = ol_txrx_peer_find_by_id(htt_pdev->txrx_pdev, peer_id);
1829*5113495bSYour Name 		if (peer) {
1830*5113495bSYour Name 			QDF_NBUF_CB_DP_TRACE_PRINT(head_buf) = false;
1831*5113495bSYour Name 			qdf_dp_trace_set_track(head_buf, QDF_RX);
1832*5113495bSYour Name 			QDF_NBUF_CB_TX_PACKET_TRACK(head_buf) =
1833*5113495bSYour Name 						QDF_NBUF_TX_PKT_DATA_TRACK;
1834*5113495bSYour Name 			qdf_dp_trace_log_pkt(peer->vdev->vdev_id,
1835*5113495bSYour Name 				head_buf, QDF_RX,
1836*5113495bSYour Name 				QDF_TRACE_DEFAULT_PDEV_ID,
1837*5113495bSYour Name 				peer->vdev->qdf_opmode);
1838*5113495bSYour Name 			DPTRACE(qdf_dp_trace(head_buf,
1839*5113495bSYour Name 				QDF_DP_TRACE_RX_OFFLOAD_HTT_PACKET_PTR_RECORD,
1840*5113495bSYour Name 				QDF_TRACE_DEFAULT_PDEV_ID,
1841*5113495bSYour Name 				qdf_nbuf_data_addr(head_buf),
1842*5113495bSYour Name 				sizeof(qdf_nbuf_data(head_buf)), QDF_RX));
1843*5113495bSYour Name 			ol_rx_data_process(peer, head_buf);
1844*5113495bSYour Name 		} else {
1845*5113495bSYour Name 			buf = head_buf;
1846*5113495bSYour Name 			while (1) {
1847*5113495bSYour Name 				qdf_nbuf_t next;
1848*5113495bSYour Name 
1849*5113495bSYour Name 				next = qdf_nbuf_next(buf);
1850*5113495bSYour Name 				htt_rx_desc_frame_free(htt_pdev, buf);
1851*5113495bSYour Name 				if (buf == tail_buf)
1852*5113495bSYour Name 					break;
1853*5113495bSYour Name 				buf = next;
1854*5113495bSYour Name 			}
1855*5113495bSYour Name 		}
1856*5113495bSYour Name 		msdu_iter++;
1857*5113495bSYour Name 		msdu_count--;
1858*5113495bSYour Name 	}
1859*5113495bSYour Name 	htt_rx_msdu_buff_replenish(htt_pdev);
1860*5113495bSYour Name }
1861*5113495bSYour Name #endif
1862*5113495bSYour Name 
1863*5113495bSYour Name #ifdef FEATURE_MONITOR_MODE_SUPPORT
1864*5113495bSYour Name /**
1865*5113495bSYour Name  * ol_htt_mon_note_chan() - Update monitor channel information
1866*5113495bSYour Name  * @pdev:  handle to the physical device
1867*5113495bSYour Name  * @mon_ch: Monitor channel
1868*5113495bSYour Name  *
1869*5113495bSYour Name  * Return: None
1870*5113495bSYour Name  */
ol_htt_mon_note_chan(struct cdp_pdev * ppdev,int mon_ch)1871*5113495bSYour Name void ol_htt_mon_note_chan(struct cdp_pdev *ppdev, int mon_ch)
1872*5113495bSYour Name {
1873*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
1874*5113495bSYour Name 
1875*5113495bSYour Name 	htt_rx_mon_note_capture_channel(pdev->htt_pdev, mon_ch);
1876*5113495bSYour Name }
1877*5113495bSYour Name #endif
1878*5113495bSYour Name 
1879*5113495bSYour Name #ifdef NEVERDEFINED
1880*5113495bSYour Name /**
1881*5113495bSYour Name  * @brief populates vow ext stats in given network buffer.
1882*5113495bSYour Name  * @param msdu - network buffer handle
1883*5113495bSYour Name  * @param pdev - handle to htt dev.
1884*5113495bSYour Name  */
ol_ath_add_vow_extstats(htt_pdev_handle pdev,qdf_nbuf_t msdu)1885*5113495bSYour Name void ol_ath_add_vow_extstats(htt_pdev_handle pdev, qdf_nbuf_t msdu)
1886*5113495bSYour Name {
1887*5113495bSYour Name 	/* FIX THIS:
1888*5113495bSYour Name 	 * txrx should not be directly using data types (scn)
1889*5113495bSYour Name 	 * that are internal to other modules.
1890*5113495bSYour Name 	 */
1891*5113495bSYour Name 	struct ol_ath_softc_net80211 *scn =
1892*5113495bSYour Name 		(struct ol_ath_softc_net80211 *)pdev->ctrl_pdev;
1893*5113495bSYour Name 	uint8_t *data, *l3_hdr, *bp;
1894*5113495bSYour Name 	uint16_t ethertype;
1895*5113495bSYour Name 	int offset;
1896*5113495bSYour Name 	struct vow_extstats vowstats;
1897*5113495bSYour Name 
1898*5113495bSYour Name 	if (scn->vow_extstats == 0)
1899*5113495bSYour Name 		return;
1900*5113495bSYour Name 
1901*5113495bSYour Name 	data = qdf_nbuf_data(msdu);
1902*5113495bSYour Name 
1903*5113495bSYour Name 	offset = QDF_MAC_ADDR_SIZE * 2;
1904*5113495bSYour Name 	l3_hdr = data + ETHERNET_HDR_LEN;
1905*5113495bSYour Name 	ethertype = (data[offset] << 8) | data[offset + 1];
1906*5113495bSYour Name 	if (ethertype == ETHERTYPE_IPV4) {
1907*5113495bSYour Name 		offset = IPV4_HDR_OFFSET_PROTOCOL;
1908*5113495bSYour Name 		if ((l3_hdr[offset] == IP_PROTOCOL_UDP) &&
1909*5113495bSYour Name 				(l3_hdr[0] == IP_VER4_N_NO_EXTRA_HEADERS)) {
1910*5113495bSYour Name 			bp = data + EXT_HDR_OFFSET;
1911*5113495bSYour Name 
1912*5113495bSYour Name 			if ((data[RTP_HDR_OFFSET] == UDP_PDU_RTP_EXT) &&
1913*5113495bSYour Name 					(bp[0] == 0x12) &&
1914*5113495bSYour Name 					(bp[1] == 0x34) &&
1915*5113495bSYour Name 					(bp[2] == 0x00) && (bp[3] == 0x08)) {
1916*5113495bSYour Name 				/*
1917*5113495bSYour Name 				 * Clear UDP checksum so we do not have
1918*5113495bSYour Name 				 * to recalculate it
1919*5113495bSYour Name 				 * after filling in status fields.
1920*5113495bSYour Name 				 */
1921*5113495bSYour Name 				data[UDP_CKSUM_OFFSET] = 0;
1922*5113495bSYour Name 				data[(UDP_CKSUM_OFFSET + 1)] = 0;
1923*5113495bSYour Name 
1924*5113495bSYour Name 				bp += IPERF3_DATA_OFFSET;
1925*5113495bSYour Name 
1926*5113495bSYour Name 				htt_rx_get_vowext_stats(msdu,
1927*5113495bSYour Name 						&vowstats);
1928*5113495bSYour Name 
1929*5113495bSYour Name 				/* control channel RSSI */
1930*5113495bSYour Name 				*bp++ = vowstats.rx_rssi_ctl0;
1931*5113495bSYour Name 				*bp++ = vowstats.rx_rssi_ctl1;
1932*5113495bSYour Name 				*bp++ = vowstats.rx_rssi_ctl2;
1933*5113495bSYour Name 
1934*5113495bSYour Name 				/* rx rate info */
1935*5113495bSYour Name 				*bp++ = vowstats.rx_bw;
1936*5113495bSYour Name 				*bp++ = vowstats.rx_sgi;
1937*5113495bSYour Name 				*bp++ = vowstats.rx_nss;
1938*5113495bSYour Name 
1939*5113495bSYour Name 				*bp++ = vowstats.rx_rssi_comb;
1940*5113495bSYour Name 				/* rsflags */
1941*5113495bSYour Name 				*bp++ = vowstats.rx_rs_flags;
1942*5113495bSYour Name 
1943*5113495bSYour Name 				/* Time stamp Lo */
1944*5113495bSYour Name 				*bp++ = (uint8_t)
1945*5113495bSYour Name 					((vowstats.
1946*5113495bSYour Name 					  rx_macTs & 0x0000ff00) >> 8);
1947*5113495bSYour Name 				*bp++ = (uint8_t)
1948*5113495bSYour Name 					(vowstats.rx_macTs & 0x0000ff);
1949*5113495bSYour Name 				/* rx phy errors */
1950*5113495bSYour Name 				*bp++ = (uint8_t)
1951*5113495bSYour Name 					((scn->chan_stats.
1952*5113495bSYour Name 					  phy_err_cnt >> 8) & 0xff);
1953*5113495bSYour Name 				*bp++ =
1954*5113495bSYour Name 					(uint8_t) (scn->chan_stats.
1955*5113495bSYour Name 							phy_err_cnt & 0xff);
1956*5113495bSYour Name 				/* rx clear count */
1957*5113495bSYour Name 				*bp++ = (uint8_t)
1958*5113495bSYour Name 					((scn->mib_cycle_cnts.
1959*5113495bSYour Name 					  rx_clear_count >> 24) & 0xff);
1960*5113495bSYour Name 				*bp++ = (uint8_t)
1961*5113495bSYour Name 					((scn->mib_cycle_cnts.
1962*5113495bSYour Name 					  rx_clear_count >> 16) & 0xff);
1963*5113495bSYour Name 				*bp++ = (uint8_t)
1964*5113495bSYour Name 					((scn->mib_cycle_cnts.
1965*5113495bSYour Name 					  rx_clear_count >> 8) & 0xff);
1966*5113495bSYour Name 				*bp++ = (uint8_t)
1967*5113495bSYour Name 					(scn->mib_cycle_cnts.
1968*5113495bSYour Name 					 rx_clear_count & 0xff);
1969*5113495bSYour Name 				/* rx cycle count */
1970*5113495bSYour Name 				*bp++ = (uint8_t)
1971*5113495bSYour Name 					((scn->mib_cycle_cnts.
1972*5113495bSYour Name 					  cycle_count >> 24) & 0xff);
1973*5113495bSYour Name 				*bp++ = (uint8_t)
1974*5113495bSYour Name 					((scn->mib_cycle_cnts.
1975*5113495bSYour Name 					  cycle_count >> 16) & 0xff);
1976*5113495bSYour Name 				*bp++ = (uint8_t)
1977*5113495bSYour Name 					((scn->mib_cycle_cnts.
1978*5113495bSYour Name 					  cycle_count >> 8) & 0xff);
1979*5113495bSYour Name 				*bp++ = (uint8_t)
1980*5113495bSYour Name 					(scn->mib_cycle_cnts.
1981*5113495bSYour Name 					 cycle_count & 0xff);
1982*5113495bSYour Name 
1983*5113495bSYour Name 				*bp++ = vowstats.rx_ratecode;
1984*5113495bSYour Name 				*bp++ = vowstats.rx_moreaggr;
1985*5113495bSYour Name 
1986*5113495bSYour Name 				/* sequence number */
1987*5113495bSYour Name 				*bp++ = (uint8_t)
1988*5113495bSYour Name 					((vowstats.rx_seqno >> 8) &
1989*5113495bSYour Name 					 0xff);
1990*5113495bSYour Name 				*bp++ = (uint8_t)
1991*5113495bSYour Name 					(vowstats.rx_seqno & 0xff);
1992*5113495bSYour Name 			}
1993*5113495bSYour Name 		}
1994*5113495bSYour Name 	}
1995*5113495bSYour Name }
1996*5113495bSYour Name 
1997*5113495bSYour Name #endif
1998*5113495bSYour Name 
1999*5113495bSYour Name #ifdef WLAN_CFR_ENABLE
ol_rx_cfr_capture_msg_handler(qdf_nbuf_t htt_t2h_msg)2000*5113495bSYour Name void ol_rx_cfr_capture_msg_handler(qdf_nbuf_t htt_t2h_msg)
2001*5113495bSYour Name {
2002*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
2003*5113495bSYour Name 	HTT_PEER_CFR_CAPTURE_MSG_TYPE cfr_type;
2004*5113495bSYour Name 	struct htt_cfr_dump_compl_ind *cfr_dump;
2005*5113495bSYour Name 	struct htt_cfr_dump_ind_type_1 cfr_ind;
2006*5113495bSYour Name 	struct csi_cfr_header cfr_hdr = {};
2007*5113495bSYour Name 	uint32_t mem_index, req_id, vdev_id;
2008*5113495bSYour Name 	uint32_t *msg_word;
2009*5113495bSYour Name 	uint8_t *mac_addr;
2010*5113495bSYour Name 
2011*5113495bSYour Name 	msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg);
2012*5113495bSYour Name 
2013*5113495bSYour Name 	/* First payload word */
2014*5113495bSYour Name 	msg_word++;
2015*5113495bSYour Name 	cfr_dump = (struct htt_cfr_dump_compl_ind *)msg_word;
2016*5113495bSYour Name 	cfr_type = cfr_dump->msg_type;
2017*5113495bSYour Name 	if (cfr_type != HTT_PEER_CFR_CAPTURE_MSG_TYPE_1) {
2018*5113495bSYour Name 		ol_txrx_err("Unsupported cfr msg type 0x%x", cfr_type);
2019*5113495bSYour Name 		return;
2020*5113495bSYour Name 	}
2021*5113495bSYour Name 
2022*5113495bSYour Name 	/* Second payload word */
2023*5113495bSYour Name 	msg_word++;
2024*5113495bSYour Name 	req_id = HTT_T2H_CFR_DUMP_TYPE1_MEM_REQ_ID_GET(*msg_word);
2025*5113495bSYour Name 	if (req_id != CFR_CAPTURE_HOST_MEM_REQ_ID) {
2026*5113495bSYour Name 		ol_txrx_err("Invalid req id in cfr capture msg");
2027*5113495bSYour Name 		return;
2028*5113495bSYour Name 	}
2029*5113495bSYour Name 	cfr_hdr.start_magic_num = 0xDEADBEAF;
2030*5113495bSYour Name 	cfr_hdr.u.meta_v1.status = HTT_T2H_CFR_DUMP_TYPE1_STATUS_GET(
2031*5113495bSYour Name 					*msg_word);
2032*5113495bSYour Name 	cfr_hdr.u.meta_v1.capture_bw = HTT_T2H_CFR_DUMP_TYPE1_CAP_BW_GET(
2033*5113495bSYour Name 					*msg_word);
2034*5113495bSYour Name 	cfr_hdr.u.meta_v1.capture_mode = HTT_T2H_CFR_DUMP_TYPE1_MODE_GET(
2035*5113495bSYour Name 					*msg_word);
2036*5113495bSYour Name 	cfr_hdr.u.meta_v1.sts_count = HTT_T2H_CFR_DUMP_TYPE1_STS_GET(
2037*5113495bSYour Name 					*msg_word);
2038*5113495bSYour Name 	cfr_hdr.u.meta_v1.channel_bw = HTT_T2H_CFR_DUMP_TYPE1_CHAN_BW_GET(
2039*5113495bSYour Name 					*msg_word);
2040*5113495bSYour Name 	cfr_hdr.u.meta_v1.capture_type = HTT_T2H_CFR_DUMP_TYPE1_CAP_TYPE_GET(
2041*5113495bSYour Name 					*msg_word);
2042*5113495bSYour Name 
2043*5113495bSYour Name 	vdev_id = HTT_T2H_CFR_DUMP_TYPE1_VDEV_ID_GET(*msg_word);
2044*5113495bSYour Name 
2045*5113495bSYour Name 	mac_addr = (uint8_t *)(msg_word + 1);
2046*5113495bSYour Name 	qdf_mem_copy(cfr_hdr.u.meta_v1.peer_addr, mac_addr, QDF_MAC_ADDR_SIZE);
2047*5113495bSYour Name 
2048*5113495bSYour Name 	cfr_ind = cfr_dump->htt_cfr_dump_compl_ind_type_1;
2049*5113495bSYour Name 
2050*5113495bSYour Name 	cfr_hdr.u.meta_v1.prim20_chan = cfr_ind.chan.chan_mhz;
2051*5113495bSYour Name 	cfr_hdr.u.meta_v1.center_freq1 = cfr_ind.chan.band_center_freq1;
2052*5113495bSYour Name 	cfr_hdr.u.meta_v1.center_freq2 = cfr_ind.chan.band_center_freq2;
2053*5113495bSYour Name 	cfr_hdr.u.meta_v1.phy_mode = cfr_ind.chan.chan_mode;
2054*5113495bSYour Name 	cfr_hdr.u.meta_v1.length = cfr_ind.length;
2055*5113495bSYour Name 	cfr_hdr.u.meta_v1.timestamp = cfr_ind.timestamp;
2056*5113495bSYour Name 
2057*5113495bSYour Name 	mem_index = cfr_ind.index;
2058*5113495bSYour Name 
2059*5113495bSYour Name 	ucfg_cfr_capture_data((void *)soc->psoc, vdev_id, &cfr_hdr, mem_index);
2060*5113495bSYour Name }
2061*5113495bSYour Name #endif
2062