1 /*
2 * Copyright (c) 2011-2020 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
21 #include <qdf_util.h> /* qdf_cpu_to_le64 */
22 #include <qdf_types.h> /* bool */
23 #include <cds_ieee80211_common.h> /* ieee80211_frame */
24
25 /* external API header files */
26 #include <ol_ctrl_txrx_api.h> /* ol_rx_notify */
27 #include <ol_txrx_api.h> /* ol_txrx_pdev_handle */
28 #include <ol_txrx_htt_api.h> /* ol_rx_indication_handler */
29 #include <ol_htt_rx_api.h> /* htt_rx_peer_id, etc. */
30
31 /* internal API header files */
32 #include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_by_id */
33 #include <ol_rx_reorder.h> /* ol_rx_reorder_store, etc. */
34 #include <ol_rx_reorder_timeout.h> /* OL_RX_REORDER_TIMEOUT_UPDATE */
35 #include <ol_rx_defrag.h> /* ol_rx_defrag_waitlist_flush */
36 #include <ol_txrx_internal.h>
37 #include <ol_txrx.h>
38 #include <wdi_event.h>
39 #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
40 #include <ol_txrx_encap.h> /* ol_rx_decap_info_t, etc */
41 #endif
42 #include <ol_rx.h>
43
44 /* FIX THIS: txrx should not include private header files of other modules */
45 #include <htt_types.h>
46 #include <ol_if_athvar.h>
47 #include <enet.h> /* ethernet + SNAP/LLC header defs and
48 * ethertype values
49 */
50 #include <ip_prot.h> /* IP protocol values */
51 #include <ipv4.h> /* IPv4 header defs */
52 #include <ipv6_defs.h> /* IPv6 header defs */
53 #include <ol_vowext_dbg_defs.h>
54 #include <wma.h>
55 #include <wlan_policy_mgr_api.h>
56 #include "pktlog_ac_fmt.h"
57 #include <cdp_txrx_handle.h>
58 #include <pld_common.h>
59 #include <htt_internal.h>
60 #include <wlan_pkt_capture_ucfg_api.h>
61 #include <wlan_cfr_ucfg_api.h>
62
63 #ifndef OL_RX_INDICATION_MAX_RECORDS
64 #define OL_RX_INDICATION_MAX_RECORDS 2048
65 #endif
66
67 /**
68 * enum ol_rx_ind_record_type - OL rx indication events
69 * @OL_RX_INDICATION_POP_START: event recorded before netbuf pop
70 * @OL_RX_INDICATION_POP_END: event recorded after netbuf pop
71 * @OL_RX_INDICATION_BUF_REPLENISH: event recorded after buffer replenishment
72 */
73 enum ol_rx_ind_record_type {
74 OL_RX_INDICATION_POP_START,
75 OL_RX_INDICATION_POP_END,
76 OL_RX_INDICATION_BUF_REPLENISH,
77 };
78
79 /**
80 * struct ol_rx_ind_record - structure for detailing ol txrx rx ind. event
81 * @value: info corresponding to rx indication event
82 * @type: what the event was
83 * @time: when it happened
84 */
85 struct ol_rx_ind_record {
86 uint16_t value;
87 enum ol_rx_ind_record_type type;
88 uint64_t time;
89 };
90
91 #ifdef OL_RX_INDICATION_RECORD
92 static uint32_t ol_rx_ind_record_index;
93 struct ol_rx_ind_record
94 ol_rx_indication_record_history[OL_RX_INDICATION_MAX_RECORDS];
95
96 /**
97 * ol_rx_ind_record_event() - record ol rx indication events
98 * @value: contains rx ind. event related info
99 * @type: ol rx indication message type
100 *
101 * This API record the ol rx indiation event in a rx indication
102 * record buffer.
103 *
104 * Return: None
105 */
ol_rx_ind_record_event(uint32_t value,enum ol_rx_ind_record_type type)106 static void ol_rx_ind_record_event(uint32_t value,
107 enum ol_rx_ind_record_type type)
108 {
109 ol_rx_indication_record_history[ol_rx_ind_record_index].value = value;
110 ol_rx_indication_record_history[ol_rx_ind_record_index].type = type;
111 ol_rx_indication_record_history[ol_rx_ind_record_index].time =
112 qdf_get_log_timestamp();
113
114 ol_rx_ind_record_index++;
115 if (ol_rx_ind_record_index >= OL_RX_INDICATION_MAX_RECORDS)
116 ol_rx_ind_record_index = 0;
117 }
118 #else
119 static inline
ol_rx_ind_record_event(uint32_t value,enum ol_rx_ind_record_type type)120 void ol_rx_ind_record_event(uint32_t value, enum ol_rx_ind_record_type type)
121 {
122 }
123
124 #endif /* OL_RX_INDICATION_RECORD */
125
126 void ol_rx_data_process(struct ol_txrx_peer_t *peer,
127 qdf_nbuf_t rx_buf_list);
128
129 #ifdef WDI_EVENT_ENABLE
130 /**
131 * ol_rx_send_pktlog_event() - send rx packetlog event
132 * @pdev: pdev handle
133 * @peer: peer handle
134 * @msdu: skb list
135 * @pktlog_bit: packetlog bit from firmware
136 *
137 * Return: none
138 */
139 #ifdef HELIUMPLUS
ol_rx_send_pktlog_event(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer,qdf_nbuf_t msdu,uint8_t pktlog_bit)140 void ol_rx_send_pktlog_event(struct ol_txrx_pdev_t *pdev,
141 struct ol_txrx_peer_t *peer, qdf_nbuf_t msdu, uint8_t pktlog_bit)
142 {
143 struct ol_rx_remote_data data;
144
145 /**
146 * pktlog is meant to log rx_desc information which is
147 * already overwritten by radio header when monitor mode is ON.
148 * Therefore, Do not log pktlog event when monitor mode is ON.
149 */
150 if (!pktlog_bit || (cds_get_conparam() == QDF_GLOBAL_MONITOR_MODE))
151 return;
152
153 data.msdu = msdu;
154 if (peer)
155 data.mac_id = peer->vdev->mac_id;
156 else
157 data.mac_id = 0;
158
159 wdi_event_handler(WDI_EVENT_RX_DESC_REMOTE, pdev->id,
160 &data);
161 }
162 #else
ol_rx_send_pktlog_event(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer,qdf_nbuf_t msdu,uint8_t pktlog_bit)163 void ol_rx_send_pktlog_event(struct ol_txrx_pdev_t *pdev,
164 struct ol_txrx_peer_t *peer, qdf_nbuf_t msdu, uint8_t pktlog_bit)
165 {
166 struct ol_rx_remote_data data;
167
168 /**
169 * pktlog is meant to log rx_desc information which is
170 * already overwritten by radio header when monitor mode is ON.
171 * Therefore, Do not log pktlog event when monitor mode is ON.
172 */
173 if (cds_get_conparam() == QDF_GLOBAL_MONITOR_MODE)
174 return;
175
176 data.msdu = msdu;
177 if (peer)
178 data.mac_id = peer->vdev->mac_id;
179 else
180 data.mac_id = 0;
181
182 wdi_event_handler(WDI_EVENT_RX_DESC_REMOTE, pdev->id,
183 &data);
184 }
185 #endif
186 #endif /* WDI_EVENT_ENABLE */
187
188 #ifdef HTT_RX_RESTORE
189
ol_rx_restore_handler(struct work_struct * htt_rx)190 static void ol_rx_restore_handler(struct work_struct *htt_rx)
191 {
192 qdf_device_t qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
193
194 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
195 "Enter: %s", __func__);
196 pld_device_self_recovery(qdf_ctx->dev);
197 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
198 "Exit: %s", __func__);
199 }
200
201 static DECLARE_WORK(ol_rx_restore_work, ol_rx_restore_handler);
202
ol_rx_trigger_restore(htt_pdev_handle htt_pdev,qdf_nbuf_t head_msdu,qdf_nbuf_t tail_msdu)203 void ol_rx_trigger_restore(htt_pdev_handle htt_pdev, qdf_nbuf_t head_msdu,
204 qdf_nbuf_t tail_msdu)
205 {
206 qdf_nbuf_t next;
207
208 while (head_msdu) {
209 next = qdf_nbuf_next(head_msdu);
210 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
211 "freeing %pK\n", head_msdu);
212 qdf_nbuf_free(head_msdu);
213 head_msdu = next;
214 }
215
216 if (!htt_pdev->rx_ring.htt_rx_restore) {
217 cds_set_recovery_in_progress(true);
218 htt_pdev->rx_ring.htt_rx_restore = 1;
219 schedule_work(&ol_rx_restore_work);
220 }
221 }
222 #endif
223
224 /**
225 * ol_rx_update_histogram_stats() - update rx histogram statistics
226 * @msdu_count: msdu count
227 * @frag_ind: fragment indication set
228 * @offload_ind: offload indication set
229 *
230 * Return: none
231 */
ol_rx_update_histogram_stats(uint32_t msdu_count,uint8_t frag_ind,uint8_t offload_ind)232 void ol_rx_update_histogram_stats(uint32_t msdu_count, uint8_t frag_ind,
233 uint8_t offload_ind)
234 {
235 struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
236 ol_txrx_pdev_handle pdev;
237
238 if (qdf_unlikely(!soc))
239 return;
240
241 pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
242 if (!pdev) {
243 ol_txrx_err("pdev is NULL");
244 return;
245 }
246
247 if (msdu_count > 60) {
248 TXRX_STATS_ADD(pdev, pub.rx.rx_ind_histogram.pkts_61_plus, 1);
249 } else if (msdu_count > 50) {
250 TXRX_STATS_ADD(pdev, pub.rx.rx_ind_histogram.pkts_51_60, 1);
251 } else if (msdu_count > 40) {
252 TXRX_STATS_ADD(pdev, pub.rx.rx_ind_histogram.pkts_41_50, 1);
253 } else if (msdu_count > 30) {
254 TXRX_STATS_ADD(pdev, pub.rx.rx_ind_histogram.pkts_31_40, 1);
255 } else if (msdu_count > 20) {
256 TXRX_STATS_ADD(pdev, pub.rx.rx_ind_histogram.pkts_21_30, 1);
257 } else if (msdu_count > 10) {
258 TXRX_STATS_ADD(pdev, pub.rx.rx_ind_histogram.pkts_11_20, 1);
259 } else if (msdu_count > 1) {
260 TXRX_STATS_ADD(pdev, pub.rx.rx_ind_histogram.pkts_2_10, 1);
261 } else if (msdu_count == 1) {
262 TXRX_STATS_ADD(pdev, pub.rx.rx_ind_histogram.pkts_1, 1);
263 }
264
265 if (frag_ind)
266 TXRX_STATS_ADD(pdev, pub.rx.msdus_with_frag_ind, msdu_count);
267
268 if (offload_ind)
269 TXRX_STATS_ADD(pdev, pub.rx.msdus_with_offload_ind, msdu_count);
270
271 }
272
273 #ifdef WLAN_PARTIAL_REORDER_OFFLOAD
274
275 #ifdef WDI_EVENT_ENABLE
ol_rx_process_inv_peer(ol_txrx_pdev_handle pdev,void * rx_mpdu_desc,qdf_nbuf_t msdu)276 static void ol_rx_process_inv_peer(ol_txrx_pdev_handle pdev,
277 void *rx_mpdu_desc, qdf_nbuf_t msdu)
278 {
279 uint8_t a1[QDF_MAC_ADDR_SIZE];
280 htt_pdev_handle htt_pdev = pdev->htt_pdev;
281 struct ol_txrx_vdev_t *vdev = NULL;
282 struct ieee80211_frame *wh;
283 struct wdi_event_rx_peer_invalid_msg msg;
284
285 wh = (struct ieee80211_frame *)
286 htt_rx_mpdu_wifi_hdr_retrieve(htt_pdev, rx_mpdu_desc);
287 /*
288 * Klocwork issue #6152
289 * All targets that send a "INVALID_PEER" rx status provide a
290 * 802.11 header for each rx MPDU, so it is certain that
291 * htt_rx_mpdu_wifi_hdr_retrieve will succeed.
292 * However, both for robustness, e.g. if this function is given a
293 * MSDU descriptor rather than a MPDU descriptor, and to make it
294 * clear to static analysis that this code is safe, add an explicit
295 * check that htt_rx_mpdu_wifi_hdr_retrieve provides a non-NULL value.
296 */
297 if (!wh || !IEEE80211_IS_DATA(wh))
298 return;
299
300 /* ignore frames for non-existent bssids */
301 qdf_mem_copy(a1, wh->i_addr1, QDF_MAC_ADDR_SIZE);
302 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
303 if (qdf_mem_cmp(a1, vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE))
304 break;
305 }
306 if (!vdev)
307 return;
308
309 msg.wh = wh;
310 msg.msdu = msdu;
311 msg.vdev_id = vdev->vdev_id;
312 wdi_event_handler(WDI_EVENT_RX_PEER_INVALID, pdev->id,
313 &msg);
314 }
315 #else
316 static inline
ol_rx_process_inv_peer(ol_txrx_pdev_handle pdev,void * rx_mpdu_desc,qdf_nbuf_t msdu)317 void ol_rx_process_inv_peer(ol_txrx_pdev_handle pdev,
318 void *rx_mpdu_desc, qdf_nbuf_t msdu)
319 {
320 }
321 #endif
322
323 #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
324 static inline int16_t
ol_rx_rssi_avg(struct ol_txrx_pdev_t * pdev,int16_t rssi_old,int16_t rssi_new)325 ol_rx_rssi_avg(struct ol_txrx_pdev_t *pdev, int16_t rssi_old, int16_t rssi_new)
326 {
327 int rssi_old_weight;
328
329 if (rssi_new == HTT_RSSI_INVALID)
330 return rssi_old;
331 if (rssi_old == HTT_RSSI_INVALID)
332 return rssi_new;
333
334 rssi_old_weight =
335 (1 << pdev->rssi_update_shift) - pdev->rssi_new_weight;
336 return (rssi_new * pdev->rssi_new_weight +
337 rssi_old * rssi_old_weight) >> pdev->rssi_update_shift;
338 }
339
340 static void
ol_rx_ind_rssi_update(struct ol_txrx_peer_t * peer,qdf_nbuf_t rx_ind_msg)341 ol_rx_ind_rssi_update(struct ol_txrx_peer_t *peer, qdf_nbuf_t rx_ind_msg)
342 {
343 struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
344
345 peer->rssi_dbm = ol_rx_rssi_avg(pdev, peer->rssi_dbm,
346 htt_rx_ind_rssi_dbm(pdev->htt_pdev,
347 rx_ind_msg));
348 }
349
350 static void
ol_rx_mpdu_rssi_update(struct ol_txrx_peer_t * peer,void * rx_mpdu_desc)351 ol_rx_mpdu_rssi_update(struct ol_txrx_peer_t *peer, void *rx_mpdu_desc)
352 {
353 struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
354
355 if (!peer)
356 return;
357 peer->rssi_dbm = ol_rx_rssi_avg(pdev, peer->rssi_dbm,
358 htt_rx_mpdu_desc_rssi_dbm(
359 pdev->htt_pdev,
360 rx_mpdu_desc));
361 }
362
363 #else
364 #define ol_rx_ind_rssi_update(peer, rx_ind_msg) /* no-op */
365 #define ol_rx_mpdu_rssi_update(peer, rx_mpdu_desc) /* no-op */
366 #endif /* QCA_SUPPORT_PEER_DATA_RX_RSSI */
367
discard_msdus(htt_pdev_handle htt_pdev,qdf_nbuf_t head_msdu,qdf_nbuf_t tail_msdu)368 static void discard_msdus(htt_pdev_handle htt_pdev,
369 qdf_nbuf_t head_msdu,
370 qdf_nbuf_t tail_msdu)
371 {
372 while (1) {
373 qdf_nbuf_t next;
374
375 next = qdf_nbuf_next(
376 head_msdu);
377 htt_rx_desc_frame_free
378 (htt_pdev,
379 head_msdu);
380 if (head_msdu ==
381 tail_msdu) {
382 break;
383 }
384 head_msdu = next;
385 }
386 }
387
chain_msdus(htt_pdev_handle htt_pdev,qdf_nbuf_t head_msdu,qdf_nbuf_t tail_msdu)388 static void chain_msdus(htt_pdev_handle htt_pdev,
389 qdf_nbuf_t head_msdu,
390 qdf_nbuf_t tail_msdu)
391 {
392 while (1) {
393 qdf_nbuf_t next;
394
395 next = qdf_nbuf_next(head_msdu);
396 htt_rx_desc_frame_free(
397 htt_pdev,
398 head_msdu);
399 if (head_msdu == tail_msdu)
400 break;
401 head_msdu = next;
402 }
403 }
404
process_reorder(ol_txrx_pdev_handle pdev,void * rx_mpdu_desc,uint8_t tid,struct ol_txrx_peer_t * peer,qdf_nbuf_t head_msdu,qdf_nbuf_t tail_msdu,int num_mpdu_ranges,int num_mpdus,bool rx_ind_release)405 static void process_reorder(ol_txrx_pdev_handle pdev,
406 void *rx_mpdu_desc,
407 uint8_t tid,
408 struct ol_txrx_peer_t *peer,
409 qdf_nbuf_t head_msdu,
410 qdf_nbuf_t tail_msdu,
411 int num_mpdu_ranges,
412 int num_mpdus,
413 bool rx_ind_release)
414 {
415 htt_pdev_handle htt_pdev = pdev->htt_pdev;
416 enum htt_rx_status mpdu_status;
417 int reorder_idx;
418
419 reorder_idx = htt_rx_mpdu_desc_reorder_idx(htt_pdev, rx_mpdu_desc,
420 true);
421 OL_RX_REORDER_TRACE_ADD(pdev, tid,
422 reorder_idx,
423 htt_rx_mpdu_desc_seq_num(htt_pdev,
424 rx_mpdu_desc, false),
425 1);
426 ol_rx_mpdu_rssi_update(peer, rx_mpdu_desc);
427 /*
428 * In most cases, out-of-bounds and duplicate sequence number detection
429 * is performed by the target, but in some cases it is done by the host.
430 * Specifically, the host does rx out-of-bounds sequence number
431 * detection for:
432 * 1. Peregrine or Rome target
433 * for peer-TIDs that do not have aggregation enabled, if the
434 * RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK flag
435 * is set during the driver build.
436 * 2. Riva-family targets, which have rx reorder timeouts handled by
437 * the host rather than the target.
438 * (The target already does duplicate detection, but the host
439 * may have given up waiting for a particular sequence number before
440 * it arrives. In this case, the out-of-bounds sequence number
441 * of the late frame allows the host to discard it, rather than
442 * sending it out of order.
443 */
444 mpdu_status = OL_RX_SEQ_NUM_CHECK(pdev,
445 peer,
446 tid,
447 rx_mpdu_desc);
448 if (mpdu_status != htt_rx_status_ok) {
449 /*
450 * If the sequence number was out of bounds, the MPDU needs
451 * to be discarded.
452 */
453 discard_msdus(htt_pdev, head_msdu, tail_msdu);
454 /*
455 * For Peregrine and Rome,
456 * OL_RX_REORDER_SEQ_NUM_CHECK should only fail for the case
457 * of (duplicate) non-aggregates.
458 *
459 * For Riva, Pronto and Northstar,
460 * there should be only one MPDU delivered at a time.
461 * Thus, there are no further MPDUs that need to be
462 * processed here.
463 * Just to be sure this is true, check the assumption
464 * that this was the only MPDU referenced by the rx
465 * indication.
466 */
467 TXRX_ASSERT2((num_mpdu_ranges == 1) && num_mpdus == 1);
468
469 /*
470 * The MPDU was not stored in the rx reorder array, so
471 * there's nothing to release.
472 */
473 rx_ind_release = false;
474 } else {
475 ol_rx_reorder_store(pdev, peer, tid,
476 reorder_idx, head_msdu, tail_msdu);
477 if (peer->tids_rx_reorder[tid].win_sz_mask == 0) {
478 peer->tids_last_seq[tid] = htt_rx_mpdu_desc_seq_num(
479 htt_pdev,
480 rx_mpdu_desc, false);
481 }
482 }
483 } /* process_reorder */
484
485 #ifdef WLAN_FEATURE_DSRC
486 static void
ol_rx_ocb_update_peer(ol_txrx_pdev_handle pdev,qdf_nbuf_t rx_ind_msg,struct ol_txrx_peer_t * peer)487 ol_rx_ocb_update_peer(ol_txrx_pdev_handle pdev, qdf_nbuf_t rx_ind_msg,
488 struct ol_txrx_peer_t *peer)
489 {
490 int i;
491
492 htt_rx_ind_legacy_rate(pdev->htt_pdev, rx_ind_msg,
493 &peer->last_pkt_legacy_rate,
494 &peer->last_pkt_legacy_rate_sel);
495 peer->last_pkt_rssi_cmb = htt_rx_ind_rssi_dbm(
496 pdev->htt_pdev, rx_ind_msg);
497 for (i = 0; i < 4; i++)
498 peer->last_pkt_rssi[i] =
499 htt_rx_ind_rssi_dbm_chain(pdev->htt_pdev, rx_ind_msg, i);
500
501 htt_rx_ind_timestamp(pdev->htt_pdev, rx_ind_msg,
502 &peer->last_pkt_timestamp_microsec,
503 &peer->last_pkt_timestamp_submicrosec);
504 peer->last_pkt_tsf = htt_rx_ind_tsf32(pdev->htt_pdev, rx_ind_msg);
505 peer->last_pkt_tid = htt_rx_ind_ext_tid(pdev->htt_pdev, rx_ind_msg);
506 }
507 #else
508 static void
ol_rx_ocb_update_peer(ol_txrx_pdev_handle pdev,qdf_nbuf_t rx_ind_msg,struct ol_txrx_peer_t * peer)509 ol_rx_ocb_update_peer(ol_txrx_pdev_handle pdev, qdf_nbuf_t rx_ind_msg,
510 struct ol_txrx_peer_t *peer)
511 {
512 }
513 #endif
514
515 void
ol_rx_indication_handler(ol_txrx_pdev_handle pdev,qdf_nbuf_t rx_ind_msg,uint16_t peer_id,uint8_t tid,int num_mpdu_ranges)516 ol_rx_indication_handler(ol_txrx_pdev_handle pdev,
517 qdf_nbuf_t rx_ind_msg,
518 uint16_t peer_id, uint8_t tid, int num_mpdu_ranges)
519 {
520 int mpdu_range;
521 unsigned int seq_num_start = 0, seq_num_end = 0;
522 bool rx_ind_release = false;
523 struct ol_txrx_vdev_t *vdev = NULL;
524 struct ol_txrx_peer_t *peer;
525 htt_pdev_handle htt_pdev;
526 uint16_t center_freq;
527 uint16_t chan1;
528 uint16_t chan2;
529 uint8_t phymode;
530 bool ret;
531 uint32_t msdu_count = 0;
532
533 htt_pdev = pdev->htt_pdev;
534 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
535 if (!peer) {
536 /*
537 * If we can't find a peer send this packet to OCB interface
538 * using OCB self peer
539 */
540 if (!ol_txrx_get_ocb_peer(pdev, &peer))
541 peer = NULL;
542 }
543
544 if (peer) {
545 vdev = peer->vdev;
546 ol_rx_ind_rssi_update(peer, rx_ind_msg);
547
548 if (vdev->opmode == wlan_op_mode_ocb)
549 ol_rx_ocb_update_peer(pdev, rx_ind_msg, peer);
550 }
551
552 TXRX_STATS_INCR(pdev, priv.rx.normal.ppdus);
553
554 OL_RX_REORDER_TIMEOUT_MUTEX_LOCK(pdev);
555
556 if (htt_rx_ind_flush(pdev->htt_pdev, rx_ind_msg) && peer) {
557 htt_rx_ind_flush_seq_num_range(pdev->htt_pdev, rx_ind_msg,
558 &seq_num_start, &seq_num_end);
559 if (tid == HTT_INVALID_TID) {
560 /*
561 * host/FW reorder state went out-of sync
562 * for a while because FW ran out of Rx indication
563 * buffer. We have to discard all the buffers in
564 * reorder queue.
565 */
566 ol_rx_reorder_peer_cleanup(vdev, peer);
567 } else {
568 if (tid >= OL_TXRX_NUM_EXT_TIDS) {
569 ol_txrx_err("invalid tid, %u", tid);
570 WARN_ON(1);
571 return;
572 }
573 ol_rx_reorder_flush(vdev, peer, tid, seq_num_start,
574 seq_num_end, htt_rx_flush_release);
575 }
576 }
577
578 if (htt_rx_ind_release(pdev->htt_pdev, rx_ind_msg)) {
579 /*
580 * The ind info of release is saved here and do release at the
581 * end. This is for the reason of in HL case, the qdf_nbuf_t
582 * for msg and payload are the same buf. And the buf will be
583 * changed during processing
584 */
585 rx_ind_release = true;
586 htt_rx_ind_release_seq_num_range(pdev->htt_pdev, rx_ind_msg,
587 &seq_num_start, &seq_num_end);
588 }
589 #ifdef DEBUG_DMA_DONE
590 pdev->htt_pdev->rx_ring.dbg_initial_msdu_payld =
591 pdev->htt_pdev->rx_ring.sw_rd_idx.msdu_payld;
592 #endif
593
594 for (mpdu_range = 0; mpdu_range < num_mpdu_ranges; mpdu_range++) {
595 enum htt_rx_status status;
596 int i, num_mpdus;
597 qdf_nbuf_t head_msdu, tail_msdu, msdu;
598 void *rx_mpdu_desc;
599
600 #ifdef DEBUG_DMA_DONE
601 pdev->htt_pdev->rx_ring.dbg_mpdu_range = mpdu_range;
602 #endif
603
604 htt_rx_ind_mpdu_range_info(pdev->htt_pdev, rx_ind_msg,
605 mpdu_range, &status, &num_mpdus);
606 if ((status == htt_rx_status_ok) && peer) {
607 TXRX_STATS_ADD(pdev, priv.rx.normal.mpdus, num_mpdus);
608 /* valid frame - deposit it into rx reordering buffer */
609 for (i = 0; i < num_mpdus; i++) {
610 int msdu_chaining;
611 /*
612 * Get a linked list of the MSDUs that comprise
613 * this MPDU.
614 * This also attaches each rx MSDU descriptor to
615 * the corresponding rx MSDU network buffer.
616 * (In some systems, the rx MSDU desc is already
617 * in the same buffer as the MSDU payload; in
618 * other systems they are separate, so a pointer
619 * needs to be set in the netbuf to locate the
620 * corresponding rx descriptor.)
621 *
622 * It is necessary to call htt_rx_amsdu_pop
623 * before htt_rx_mpdu_desc_list_next, because
624 * the (MPDU) rx descriptor has DMA unmapping
625 * done during the htt_rx_amsdu_pop call.
626 * The rx desc should not be accessed until this
627 * DMA unmapping has been done, since the DMA
628 * unmapping involves making sure the cache area
629 * for the mapped buffer is flushed, so the data
630 * written by the MAC DMA into memory will be
631 * fetched, rather than garbage from the cache.
632 */
633
634 #ifdef DEBUG_DMA_DONE
635 pdev->htt_pdev->rx_ring.dbg_mpdu_count = i;
636 #endif
637
638 msdu_chaining =
639 htt_rx_amsdu_pop(htt_pdev,
640 rx_ind_msg,
641 &head_msdu,
642 &tail_msdu,
643 &msdu_count);
644 #ifdef HTT_RX_RESTORE
645 if (htt_pdev->rx_ring.rx_reset) {
646 ol_rx_trigger_restore(htt_pdev,
647 head_msdu,
648 tail_msdu);
649 OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(
650 pdev);
651 return;
652 }
653 #endif
654 rx_mpdu_desc =
655 htt_rx_mpdu_desc_list_next(htt_pdev,
656 rx_ind_msg);
657 ret = htt_rx_msdu_center_freq(htt_pdev, peer,
658 rx_mpdu_desc, ¢er_freq, &chan1,
659 &chan2, &phymode);
660 if (ret == true) {
661 peer->last_pkt_center_freq =
662 center_freq;
663 } else {
664 peer->last_pkt_center_freq = 0;
665 }
666
667 /* Pktlog */
668 ol_rx_send_pktlog_event(pdev, peer,
669 head_msdu, 1);
670
671 if (msdu_chaining) {
672 /*
673 * TBDXXX - to deliver SDU with
674 * chaining, we need to stitch those
675 * scattered buffers into one single
676 * buffer.
677 * Just discard it now.
678 */
679 chain_msdus(htt_pdev,
680 head_msdu,
681 tail_msdu);
682 } else {
683 process_reorder(pdev, rx_mpdu_desc,
684 tid, peer,
685 head_msdu, tail_msdu,
686 num_mpdu_ranges,
687 num_mpdus,
688 rx_ind_release);
689 }
690
691 }
692 } else {
693 /* invalid frames - discard them */
694 OL_RX_REORDER_TRACE_ADD(pdev, tid,
695 TXRX_SEQ_NUM_ERR(status),
696 TXRX_SEQ_NUM_ERR(status),
697 num_mpdus);
698 TXRX_STATS_ADD(pdev, priv.rx.err.mpdu_bad, num_mpdus);
699 for (i = 0; i < num_mpdus; i++) {
700 /* pull the MPDU's MSDUs off the buffer queue */
701 htt_rx_amsdu_pop(htt_pdev, rx_ind_msg, &msdu,
702 &tail_msdu, &msdu_count);
703 #ifdef HTT_RX_RESTORE
704 if (htt_pdev->rx_ring.rx_reset) {
705 ol_rx_trigger_restore(htt_pdev, msdu,
706 tail_msdu);
707 OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(
708 pdev);
709 return;
710 }
711 #endif
712 /* pull the MPDU desc off the desc queue */
713 rx_mpdu_desc =
714 htt_rx_mpdu_desc_list_next(htt_pdev,
715 rx_ind_msg);
716 OL_RX_ERR_STATISTICS_2(pdev, vdev, peer,
717 rx_mpdu_desc, msdu,
718 status);
719
720 if (status == htt_rx_status_tkip_mic_err &&
721 vdev && peer) {
722 union htt_rx_pn_t pn;
723 uint8_t key_id;
724
725 htt_rx_mpdu_desc_pn(
726 pdev->htt_pdev,
727 htt_rx_msdu_desc_retrieve(
728 pdev->htt_pdev,
729 msdu), &pn, 48);
730 if (htt_rx_msdu_desc_key_id(
731 pdev->htt_pdev,
732 htt_rx_msdu_desc_retrieve(
733 pdev->htt_pdev,
734 msdu),
735 &key_id) == true) {
736 ol_rx_send_mic_err_ind(
737 vdev->pdev,
738 vdev->vdev_id,
739 peer->mac_addr.raw,
740 tid, 0,
741 OL_RX_ERR_TKIP_MIC,
742 msdu, &pn.pn48,
743 key_id);
744 }
745 }
746
747 if (status != htt_rx_status_ctrl_mgmt_null) {
748 /* Pktlog */
749 ol_rx_send_pktlog_event(pdev,
750 peer, msdu, 1);
751 }
752
753 if (status == htt_rx_status_err_inv_peer) {
754 /* once per mpdu */
755 ol_rx_process_inv_peer(pdev,
756 rx_mpdu_desc,
757 msdu);
758 }
759
760 while (1) {
761 /* Free the nbuf */
762 qdf_nbuf_t next;
763
764 next = qdf_nbuf_next(msdu);
765 htt_rx_desc_frame_free(htt_pdev, msdu);
766 if (msdu == tail_msdu)
767 break;
768 msdu = next;
769 }
770 }
771 }
772 }
773 /*
774 * Now that a whole batch of MSDUs have been pulled out of HTT
775 * and put into the rx reorder array, it is an appropriate time
776 * to request HTT to provide new rx MSDU buffers for the target
777 * to fill.
778 * This could be done after the end of this function, but it's
779 * better to do it now, rather than waiting until after the driver
780 * and OS finish processing the batch of rx MSDUs.
781 */
782 htt_rx_msdu_buff_replenish(htt_pdev);
783
784 if ((true == rx_ind_release) && peer && vdev) {
785 ol_rx_reorder_release(vdev, peer, tid, seq_num_start,
786 seq_num_end);
787 }
788 OL_RX_REORDER_TIMEOUT_UPDATE(peer, tid);
789 OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(pdev);
790
791 if (pdev->rx.flags.defrag_timeout_check)
792 ol_rx_defrag_waitlist_flush(pdev);
793 }
794 #endif
795
796 void
ol_rx_sec_ind_handler(ol_txrx_pdev_handle pdev,uint16_t peer_id,enum htt_sec_type sec_type,int is_unicast,uint32_t * michael_key,uint32_t * rx_pn)797 ol_rx_sec_ind_handler(ol_txrx_pdev_handle pdev,
798 uint16_t peer_id,
799 enum htt_sec_type sec_type,
800 int is_unicast, uint32_t *michael_key, uint32_t *rx_pn)
801 {
802 struct ol_txrx_peer_t *peer;
803 int sec_index, i;
804
805 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
806 if (!peer) {
807 ol_txrx_err(
808 "Couldn't find peer from ID %d - skipping security inits\n",
809 peer_id);
810 return;
811 }
812 ol_txrx_dbg(
813 "sec spec for peer %pK ("QDF_MAC_ADDR_FMT"): %s key of type %d\n",
814 peer,
815 QDF_MAC_ADDR_REF(peer->mac_addr.raw),
816 is_unicast ? "ucast" : "mcast", sec_type);
817 sec_index = is_unicast ? txrx_sec_ucast : txrx_sec_mcast;
818 peer->security[sec_index].sec_type = sec_type;
819 /*
820 * michael key only valid for TKIP
821 * but for simplicity, copy it anyway
822 */
823 qdf_mem_copy(&peer->security[sec_index].michael_key[0],
824 michael_key,
825 sizeof(peer->security[sec_index].michael_key));
826
827 if (sec_type != htt_sec_type_wapi) {
828 qdf_mem_zero(peer->tids_last_pn_valid,
829 OL_TXRX_NUM_EXT_TIDS);
830 } else if (sec_index == txrx_sec_mcast || peer->tids_last_pn_valid[0]) {
831 for (i = 0; i < OL_TXRX_NUM_EXT_TIDS; i++) {
832 /*
833 * Setting PN valid bit for WAPI sec_type,
834 * since WAPI PN has to be started with predefined value
835 */
836 peer->tids_last_pn_valid[i] = 1;
837 qdf_mem_copy((uint8_t *) &peer->tids_last_pn[i],
838 (uint8_t *) rx_pn,
839 sizeof(union htt_rx_pn_t));
840 peer->tids_last_pn[i].pn128[1] =
841 qdf_cpu_to_le64(
842 peer->tids_last_pn[i].pn128[1]);
843 peer->tids_last_pn[i].pn128[0] =
844 qdf_cpu_to_le64(
845 peer->tids_last_pn[i].pn128[0]);
846 if (sec_index == txrx_sec_ucast)
847 peer->tids_rekey_flag[i] = 1;
848 }
849 }
850 }
851
ol_rx_notify(struct cdp_cfg * cfg_pdev,uint8_t vdev_id,uint8_t * peer_mac_addr,int tid,uint32_t tsf32,enum ol_rx_notify_type notify_type,qdf_nbuf_t rx_frame)852 void ol_rx_notify(struct cdp_cfg *cfg_pdev,
853 uint8_t vdev_id,
854 uint8_t *peer_mac_addr,
855 int tid,
856 uint32_t tsf32,
857 enum ol_rx_notify_type notify_type, qdf_nbuf_t rx_frame)
858 {
859 /*
860 * NOTE: This is used in qca_main for AP mode to handle IGMP
861 * packets specially. Umac has a corresponding handler for this
862 * not sure if we need to have this for CLD as well.
863 */
864 }
865
866 #ifdef WLAN_PARTIAL_REORDER_OFFLOAD
867 /**
868 * @brief Look into a rx MSDU to see what kind of special handling it requires
869 * @details
870 * This function is called when the host rx SW sees that the target
871 * rx FW has marked a rx MSDU as needing inspection.
872 * Based on the results of the inspection, the host rx SW will infer
873 * what special handling to perform on the rx frame.
874 * Currently, the only type of frames that require special handling
875 * are IGMP frames. The rx data-path SW checks if the frame is IGMP
876 * (it should be, since the target would not have set the inspect flag
877 * otherwise), and then calls the ol_rx_notify function so the
878 * control-path SW can perform multicast group membership learning
879 * by sniffing the IGMP frame.
880 */
881 #define SIZEOF_80211_HDR (sizeof(struct ieee80211_frame))
882 static void
ol_rx_inspect(struct ol_txrx_vdev_t * vdev,struct ol_txrx_peer_t * peer,unsigned int tid,qdf_nbuf_t msdu,void * rx_desc)883 ol_rx_inspect(struct ol_txrx_vdev_t *vdev,
884 struct ol_txrx_peer_t *peer,
885 unsigned int tid, qdf_nbuf_t msdu, void *rx_desc)
886 {
887 ol_txrx_pdev_handle pdev = vdev->pdev;
888 uint8_t *data, *l3_hdr;
889 uint16_t ethertype;
890 int offset;
891
892 data = qdf_nbuf_data(msdu);
893 if (pdev->frame_format == wlan_frm_fmt_native_wifi) {
894 offset = SIZEOF_80211_HDR + LLC_SNAP_HDR_OFFSET_ETHERTYPE;
895 l3_hdr = data + SIZEOF_80211_HDR + LLC_SNAP_HDR_LEN;
896 } else {
897 offset = QDF_MAC_ADDR_SIZE * 2;
898 l3_hdr = data + ETHERNET_HDR_LEN;
899 }
900 ethertype = (data[offset] << 8) | data[offset + 1];
901 if (ethertype == ETHERTYPE_IPV4) {
902 offset = IPV4_HDR_OFFSET_PROTOCOL;
903 if (l3_hdr[offset] == IP_PROTOCOL_IGMP) {
904 ol_rx_notify(pdev->ctrl_pdev,
905 vdev->vdev_id,
906 peer->mac_addr.raw,
907 tid,
908 htt_rx_mpdu_desc_tsf32(pdev->htt_pdev,
909 rx_desc),
910 OL_RX_NOTIFY_IPV4_IGMP, msdu);
911 }
912 }
913 }
914 #endif
915
916 void
ol_rx_offload_deliver_ind_handler(ol_txrx_pdev_handle pdev,qdf_nbuf_t msg,uint16_t msdu_cnt)917 ol_rx_offload_deliver_ind_handler(ol_txrx_pdev_handle pdev,
918 qdf_nbuf_t msg, uint16_t msdu_cnt)
919 {
920 int vdev_id, peer_id, tid;
921 qdf_nbuf_t head_buf, tail_buf, buf;
922 struct ol_txrx_peer_t *peer;
923 uint8_t fw_desc;
924 htt_pdev_handle htt_pdev = pdev->htt_pdev;
925
926 if (msdu_cnt > htt_rx_offload_msdu_cnt(htt_pdev)) {
927 ol_txrx_err("invalid msdu_cnt=%u", msdu_cnt);
928
929 if (pdev->cfg.is_high_latency)
930 htt_rx_desc_frame_free(htt_pdev, msg);
931
932 return;
933 }
934
935 while (msdu_cnt) {
936 if (!htt_rx_offload_msdu_pop(htt_pdev, msg, &vdev_id, &peer_id,
937 &tid, &fw_desc, &head_buf, &tail_buf)) {
938 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
939 if (peer) {
940 ol_rx_data_process(peer, head_buf);
941 } else {
942 buf = head_buf;
943 while (1) {
944 qdf_nbuf_t next;
945
946 next = qdf_nbuf_next(buf);
947 htt_rx_desc_frame_free(htt_pdev, buf);
948 if (buf == tail_buf)
949 break;
950 buf = next;
951 }
952 }
953 }
954 msdu_cnt--;
955 }
956 htt_rx_msdu_buff_replenish(htt_pdev);
957 }
958
959 void
ol_rx_send_mic_err_ind(struct ol_txrx_pdev_t * pdev,uint8_t vdev_id,uint8_t * peer_mac_addr,int tid,uint32_t tsf32,enum ol_rx_err_type err_type,qdf_nbuf_t rx_frame,uint64_t * pn,uint8_t key_id)960 ol_rx_send_mic_err_ind(struct ol_txrx_pdev_t *pdev, uint8_t vdev_id,
961 uint8_t *peer_mac_addr, int tid, uint32_t tsf32,
962 enum ol_rx_err_type err_type, qdf_nbuf_t rx_frame,
963 uint64_t *pn, uint8_t key_id)
964 {
965 struct cdp_rx_mic_err_info mic_failure_info;
966 qdf_ether_header_t *eth_hdr;
967 struct ol_if_ops *tops = NULL;
968 struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
969 ol_txrx_soc_handle ol_txrx_soc = &soc->cdp_soc;
970
971 if (err_type != OL_RX_ERR_TKIP_MIC)
972 return;
973
974 if (qdf_nbuf_len(rx_frame) < sizeof(*eth_hdr))
975 return;
976
977 eth_hdr = (qdf_ether_header_t *)qdf_nbuf_data(rx_frame);
978
979 qdf_copy_macaddr((struct qdf_mac_addr *)&mic_failure_info.ta_mac_addr,
980 (struct qdf_mac_addr *)peer_mac_addr);
981 qdf_copy_macaddr((struct qdf_mac_addr *)&mic_failure_info.da_mac_addr,
982 (struct qdf_mac_addr *)eth_hdr->ether_dhost);
983 mic_failure_info.key_id = key_id;
984 mic_failure_info.multicast =
985 IEEE80211_IS_MULTICAST(eth_hdr->ether_dhost);
986 qdf_mem_copy(mic_failure_info.tsc, pn, SIR_CIPHER_SEQ_CTR_SIZE);
987 mic_failure_info.frame_type = cdp_rx_frame_type_802_3;
988 mic_failure_info.data = NULL;
989 mic_failure_info.vdev_id = vdev_id;
990
991 tops = ol_txrx_soc->ol_ops;
992 if (tops->rx_mic_error)
993 tops->rx_mic_error(soc->psoc, pdev->id, &mic_failure_info);
994 }
995
996 void
ol_rx_mic_error_handler(ol_txrx_pdev_handle pdev,u_int8_t tid,u_int16_t peer_id,void * msdu_desc,qdf_nbuf_t msdu)997 ol_rx_mic_error_handler(
998 ol_txrx_pdev_handle pdev,
999 u_int8_t tid,
1000 u_int16_t peer_id,
1001 void *msdu_desc,
1002 qdf_nbuf_t msdu)
1003 {
1004 union htt_rx_pn_t pn = {0};
1005 u_int8_t key_id = 0;
1006
1007 struct ol_txrx_peer_t *peer = NULL;
1008 struct ol_txrx_vdev_t *vdev = NULL;
1009
1010 if (pdev) {
1011 TXRX_STATS_MSDU_INCR(pdev, rx.dropped_mic_err, msdu);
1012 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
1013 if (peer) {
1014 vdev = peer->vdev;
1015 if (vdev) {
1016 htt_rx_mpdu_desc_pn(vdev->pdev->htt_pdev,
1017 msdu_desc, &pn, 48);
1018
1019 if (htt_rx_msdu_desc_key_id(
1020 vdev->pdev->htt_pdev, msdu_desc,
1021 &key_id) == true) {
1022 ol_rx_send_mic_err_ind(vdev->pdev,
1023 vdev->vdev_id,
1024 peer->mac_addr.raw, tid, 0,
1025 OL_RX_ERR_TKIP_MIC, msdu,
1026 &pn.pn48, key_id);
1027 }
1028 }
1029 }
1030 /* Pktlog */
1031 ol_rx_send_pktlog_event(pdev, peer, msdu, 1);
1032 }
1033 }
1034
1035 #ifdef WLAN_PARTIAL_REORDER_OFFLOAD
1036 /**
1037 * @brief Check the first msdu to decide whether the a-msdu should be accepted.
1038 */
1039 static bool
ol_rx_filter(struct ol_txrx_vdev_t * vdev,struct ol_txrx_peer_t * peer,qdf_nbuf_t msdu,void * rx_desc)1040 ol_rx_filter(struct ol_txrx_vdev_t *vdev,
1041 struct ol_txrx_peer_t *peer, qdf_nbuf_t msdu, void *rx_desc)
1042 {
1043 #define FILTER_STATUS_REJECT 1
1044 #define FILTER_STATUS_ACCEPT 0
1045 uint8_t *wh;
1046 uint32_t offset = 0;
1047 uint16_t ether_type = 0;
1048 bool is_encrypted = false, is_mcast = false;
1049 uint8_t i;
1050 enum privacy_filter_packet_type packet_type =
1051 PRIVACY_FILTER_PACKET_UNICAST;
1052 ol_txrx_pdev_handle pdev = vdev->pdev;
1053 htt_pdev_handle htt_pdev = pdev->htt_pdev;
1054 int sec_idx;
1055
1056 /*
1057 * Safemode must avoid the PrivacyExemptionList and
1058 * ExcludeUnencrypted checking
1059 */
1060 if (vdev->safemode)
1061 return FILTER_STATUS_ACCEPT;
1062
1063 is_mcast = htt_rx_msdu_is_wlan_mcast(htt_pdev, rx_desc);
1064 if (vdev->num_filters > 0) {
1065 if (pdev->frame_format == wlan_frm_fmt_native_wifi) {
1066 offset = SIZEOF_80211_HDR +
1067 LLC_SNAP_HDR_OFFSET_ETHERTYPE;
1068 } else {
1069 offset = QDF_MAC_ADDR_SIZE * 2;
1070 }
1071 /* get header info from msdu */
1072 wh = qdf_nbuf_data(msdu);
1073
1074 /* get ether type */
1075 ether_type = (wh[offset] << 8) | wh[offset + 1];
1076 /* get packet type */
1077 if (true == is_mcast)
1078 packet_type = PRIVACY_FILTER_PACKET_MULTICAST;
1079 else
1080 packet_type = PRIVACY_FILTER_PACKET_UNICAST;
1081 }
1082 /* get encrypt info */
1083 is_encrypted = htt_rx_mpdu_is_encrypted(htt_pdev, rx_desc);
1084 #ifdef ATH_SUPPORT_WAPI
1085 if ((true == is_encrypted) && (ETHERTYPE_WAI == ether_type)) {
1086 /*
1087 * We expect the WAI frames to be always unencrypted when
1088 * the UMAC gets it
1089 */
1090 return FILTER_STATUS_REJECT;
1091 }
1092 #endif /* ATH_SUPPORT_WAPI */
1093
1094 for (i = 0; i < vdev->num_filters; i++) {
1095 enum privacy_filter filter_type;
1096 enum privacy_filter_packet_type filter_packet_type;
1097
1098 /* skip if the ether type does not match */
1099 if (vdev->privacy_filters[i].ether_type != ether_type)
1100 continue;
1101
1102 /* skip if the packet type does not match */
1103 filter_packet_type = vdev->privacy_filters[i].packet_type;
1104 if (filter_packet_type != packet_type &&
1105 filter_packet_type != PRIVACY_FILTER_PACKET_BOTH) {
1106 continue;
1107 }
1108
1109 filter_type = vdev->privacy_filters[i].filter_type;
1110 if (filter_type == PRIVACY_FILTER_ALWAYS) {
1111 /*
1112 * In this case, we accept the frame if and only if
1113 * it was originally NOT encrypted.
1114 */
1115 if (true == is_encrypted)
1116 return FILTER_STATUS_REJECT;
1117 else
1118 return FILTER_STATUS_ACCEPT;
1119
1120 } else if (filter_type == PRIVACY_FILTER_KEY_UNAVAILABLE) {
1121 /*
1122 * In this case, we reject the frame if it was
1123 * originally NOT encrypted but we have the key mapping
1124 * key for this frame.
1125 */
1126 if (!is_encrypted &&
1127 !is_mcast &&
1128 (peer->security[txrx_sec_ucast].sec_type !=
1129 htt_sec_type_none) &&
1130 (peer->keyinstalled || !ETHERTYPE_IS_EAPOL_WAPI(
1131 ether_type))) {
1132 return FILTER_STATUS_REJECT;
1133 } else {
1134 return FILTER_STATUS_ACCEPT;
1135 }
1136 } else {
1137 /*
1138 * The privacy exemption does not apply to this frame.
1139 */
1140 break;
1141 }
1142 }
1143
1144 /*
1145 * If the privacy exemption list does not apply to the frame,
1146 * check ExcludeUnencrypted.
1147 * If ExcludeUnencrypted is not set, or if this was oringially
1148 * an encrypted frame, it will be accepted.
1149 */
1150 if (!vdev->drop_unenc || (true == is_encrypted))
1151 return FILTER_STATUS_ACCEPT;
1152
1153 /*
1154 * If this is a open connection, it will be accepted.
1155 */
1156 sec_idx = (true == is_mcast) ? txrx_sec_mcast : txrx_sec_ucast;
1157 if (peer->security[sec_idx].sec_type == htt_sec_type_none)
1158 return FILTER_STATUS_ACCEPT;
1159
1160 if ((false == is_encrypted) && vdev->drop_unenc) {
1161 OL_RX_ERR_STATISTICS(pdev, vdev, OL_RX_ERR_PRIVACY,
1162 pdev->sec_types[htt_sec_type_none],
1163 is_mcast);
1164 }
1165 return FILTER_STATUS_REJECT;
1166 }
1167 #endif
1168
1169 #ifdef WLAN_FEATURE_TSF_PLUS
1170 #ifdef CONFIG_HL_SUPPORT
ol_rx_timestamp(struct cdp_cfg * cfg_pdev,void * rx_desc,qdf_nbuf_t msdu)1171 void ol_rx_timestamp(struct cdp_cfg *cfg_pdev,
1172 void *rx_desc, qdf_nbuf_t msdu)
1173 {
1174 struct htt_rx_ppdu_desc_t *rx_ppdu_desc;
1175
1176 if (!ol_cfg_is_ptp_rx_opt_enabled(cfg_pdev))
1177 return;
1178
1179 if (!rx_desc || !msdu)
1180 return;
1181
1182 rx_ppdu_desc = (struct htt_rx_ppdu_desc_t *)((uint8_t *)(rx_desc) -
1183 HTT_RX_IND_HL_BYTES + HTT_RX_IND_HDR_PREFIX_BYTES);
1184 msdu->tstamp = ns_to_ktime((u_int64_t)rx_ppdu_desc->tsf32 *
1185 NSEC_PER_USEC);
1186 }
1187
ol_rx_timestamp_update(ol_txrx_pdev_handle pdev,qdf_nbuf_t head_msdu,qdf_nbuf_t tail_msdu)1188 static inline void ol_rx_timestamp_update(ol_txrx_pdev_handle pdev,
1189 qdf_nbuf_t head_msdu,
1190 qdf_nbuf_t tail_msdu)
1191 {
1192 qdf_nbuf_t loop_msdu;
1193 struct htt_host_rx_desc_base *rx_desc;
1194
1195 loop_msdu = head_msdu;
1196 while (loop_msdu) {
1197 rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, loop_msdu);
1198 ol_rx_timestamp(pdev->ctrl_pdev, rx_desc, loop_msdu);
1199 loop_msdu = qdf_nbuf_next(loop_msdu);
1200 }
1201 }
1202 #else
ol_rx_timestamp(struct cdp_cfg * cfg_pdev,void * rx_desc,qdf_nbuf_t msdu)1203 void ol_rx_timestamp(struct cdp_cfg *cfg_pdev,
1204 void *rx_desc, qdf_nbuf_t msdu)
1205 {
1206 struct htt_host_rx_desc_base *rx_mpdu_desc = rx_desc;
1207 uint32_t tsf64_low32, tsf64_high32;
1208 uint64_t tsf64, tsf64_ns;
1209
1210 if (!ol_cfg_is_ptp_rx_opt_enabled(cfg_pdev))
1211 return;
1212
1213 if (!rx_mpdu_desc || !msdu)
1214 return;
1215
1216 tsf64_low32 = rx_mpdu_desc->ppdu_end.wb_timestamp_lower_32;
1217 tsf64_high32 = rx_mpdu_desc->ppdu_end.wb_timestamp_upper_32;
1218
1219 tsf64 = (uint64_t)tsf64_high32 << 32 | tsf64_low32;
1220 if (tsf64 * NSEC_PER_USEC < tsf64)
1221 tsf64_ns = 0;
1222 else
1223 tsf64_ns = tsf64 * NSEC_PER_USEC;
1224
1225 msdu->tstamp = ns_to_ktime(tsf64_ns);
1226 }
1227
1228 /**
1229 * ol_rx_timestamp_update() - update msdu tsf64 timestamp
1230 * @pdev: pointer to txrx handle
1231 * @head_msdu: pointer to head msdu
1232 * @tail_msdu: pointer to tail msdu
1233 *
1234 * Return: none
1235 */
ol_rx_timestamp_update(ol_txrx_pdev_handle pdev,qdf_nbuf_t head_msdu,qdf_nbuf_t tail_msdu)1236 static inline void ol_rx_timestamp_update(ol_txrx_pdev_handle pdev,
1237 qdf_nbuf_t head_msdu,
1238 qdf_nbuf_t tail_msdu)
1239 {
1240 qdf_nbuf_t loop_msdu;
1241 uint64_t hostime, detlahostime, tsf64_time;
1242 struct htt_host_rx_desc_base *rx_desc;
1243
1244 if (!ol_cfg_is_ptp_rx_opt_enabled(pdev->ctrl_pdev))
1245 return;
1246
1247 if (!tail_msdu)
1248 return;
1249
1250 hostime = ktime_get_ns();
1251 rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, tail_msdu);
1252 if (rx_desc->ppdu_end.wb_timestamp_lower_32 == 0 &&
1253 rx_desc->ppdu_end.wb_timestamp_upper_32 == 0) {
1254 detlahostime = hostime - pdev->last_host_time;
1255 do_div(detlahostime, NSEC_PER_USEC);
1256 tsf64_time = pdev->last_tsf64_time + detlahostime;
1257
1258 rx_desc->ppdu_end.wb_timestamp_lower_32 =
1259 tsf64_time & 0xFFFFFFFF;
1260 rx_desc->ppdu_end.wb_timestamp_upper_32 = tsf64_time >> 32;
1261 } else {
1262 pdev->last_host_time = hostime;
1263 pdev->last_tsf64_time =
1264 (uint64_t)rx_desc->ppdu_end.wb_timestamp_upper_32 << 32 |
1265 rx_desc->ppdu_end.wb_timestamp_lower_32;
1266 }
1267
1268 loop_msdu = head_msdu;
1269 while (loop_msdu) {
1270 ol_rx_timestamp(pdev->ctrl_pdev, rx_desc, loop_msdu);
1271 loop_msdu = qdf_nbuf_next(loop_msdu);
1272 }
1273 }
1274 #endif
1275 #else
ol_rx_timestamp(struct cdp_cfg * cfg_pdev,void * rx_desc,qdf_nbuf_t msdu)1276 void ol_rx_timestamp(struct cdp_cfg *cfg_pdev,
1277 void *rx_desc, qdf_nbuf_t msdu)
1278 {
1279 }
1280
ol_rx_timestamp_update(ol_txrx_pdev_handle pdev,qdf_nbuf_t head_msdu,qdf_nbuf_t tail_msdu)1281 static inline void ol_rx_timestamp_update(ol_txrx_pdev_handle pdev,
1282 qdf_nbuf_t head_msdu,
1283 qdf_nbuf_t tail_msdu)
1284 {
1285 }
1286 #endif
1287
1288 #ifdef WLAN_FEATURE_DSRC
1289 static inline
ol_rx_ocb_prepare_rx_stats_header(struct ol_txrx_vdev_t * vdev,struct ol_txrx_peer_t * peer,qdf_nbuf_t msdu)1290 void ol_rx_ocb_prepare_rx_stats_header(struct ol_txrx_vdev_t *vdev,
1291 struct ol_txrx_peer_t *peer,
1292 qdf_nbuf_t msdu)
1293 {
1294 int i;
1295 struct ol_txrx_ocb_chan_info *chan_info = 0;
1296 int packet_freq = peer->last_pkt_center_freq;
1297
1298 for (i = 0; i < vdev->ocb_channel_count; i++) {
1299 if (vdev->ocb_channel_info[i].chan_freq == packet_freq) {
1300 chan_info = &vdev->ocb_channel_info[i];
1301 break;
1302 }
1303 }
1304
1305 if (!chan_info || !chan_info->disable_rx_stats_hdr) {
1306 qdf_ether_header_t eth_header = { {0} };
1307 struct ocb_rx_stats_hdr_t rx_header = {0};
1308
1309 /*
1310 * Construct the RX stats header and
1311 * push that to the frontof the packet.
1312 */
1313 rx_header.version = 1;
1314 rx_header.length = sizeof(rx_header);
1315 rx_header.channel_freq = peer->last_pkt_center_freq;
1316 rx_header.rssi_cmb = peer->last_pkt_rssi_cmb;
1317 qdf_mem_copy(rx_header.rssi, peer->last_pkt_rssi,
1318 sizeof(rx_header.rssi));
1319
1320 if (peer->last_pkt_legacy_rate_sel)
1321 rx_header.datarate = 0xFF;
1322 else if (peer->last_pkt_legacy_rate == 0x8)
1323 rx_header.datarate = 6;
1324 else if (peer->last_pkt_legacy_rate == 0x9)
1325 rx_header.datarate = 4;
1326 else if (peer->last_pkt_legacy_rate == 0xA)
1327 rx_header.datarate = 2;
1328 else if (peer->last_pkt_legacy_rate == 0xB)
1329 rx_header.datarate = 0;
1330 else if (peer->last_pkt_legacy_rate == 0xC)
1331 rx_header.datarate = 7;
1332 else if (peer->last_pkt_legacy_rate == 0xD)
1333 rx_header.datarate = 5;
1334 else if (peer->last_pkt_legacy_rate == 0xE)
1335 rx_header.datarate = 3;
1336 else if (peer->last_pkt_legacy_rate == 0xF)
1337 rx_header.datarate = 1;
1338 else
1339 rx_header.datarate = 0xFF;
1340
1341 rx_header.timestamp_microsec =
1342 peer->last_pkt_timestamp_microsec;
1343 rx_header.timestamp_submicrosec =
1344 peer->last_pkt_timestamp_submicrosec;
1345 rx_header.tsf32 = peer->last_pkt_tsf;
1346 rx_header.ext_tid = peer->last_pkt_tid;
1347
1348 qdf_nbuf_push_head(msdu, sizeof(rx_header));
1349 qdf_mem_copy(qdf_nbuf_data(msdu),
1350 &rx_header, sizeof(rx_header));
1351
1352 /*
1353 * Construct the ethernet header with
1354 * type 0x8152 and push that to the
1355 * front of the packet to indicate the
1356 * RX stats header.
1357 */
1358 eth_header.ether_type = QDF_SWAP_U16(ETHERTYPE_OCB_RX);
1359 qdf_nbuf_push_head(msdu, sizeof(eth_header));
1360 qdf_mem_copy(qdf_nbuf_data(msdu), ð_header,
1361 sizeof(eth_header));
1362 }
1363 }
1364 #else
1365 static inline
ol_rx_ocb_prepare_rx_stats_header(struct ol_txrx_vdev_t * vdev,struct ol_txrx_peer_t * peer,qdf_nbuf_t msdu)1366 void ol_rx_ocb_prepare_rx_stats_header(struct ol_txrx_vdev_t *vdev,
1367 struct ol_txrx_peer_t *peer,
1368 qdf_nbuf_t msdu)
1369 {
1370 }
1371 #endif
1372
1373 #ifdef WLAN_PARTIAL_REORDER_OFFLOAD
1374 void
ol_rx_deliver(struct ol_txrx_vdev_t * vdev,struct ol_txrx_peer_t * peer,unsigned int tid,qdf_nbuf_t msdu_list)1375 ol_rx_deliver(struct ol_txrx_vdev_t *vdev,
1376 struct ol_txrx_peer_t *peer, unsigned int tid,
1377 qdf_nbuf_t msdu_list)
1378 {
1379 ol_txrx_pdev_handle pdev = vdev->pdev;
1380 htt_pdev_handle htt_pdev = pdev->htt_pdev;
1381 qdf_nbuf_t deliver_list_head = NULL;
1382 qdf_nbuf_t deliver_list_tail = NULL;
1383 qdf_nbuf_t msdu;
1384 bool filter = false;
1385 #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
1386 struct ol_rx_decap_info_t info;
1387
1388 qdf_mem_zero(&info, sizeof(info));
1389 #endif
1390
1391 msdu = msdu_list;
1392 /*
1393 * Check each MSDU to see whether it requires special handling,
1394 * and free each MSDU's rx descriptor
1395 */
1396 while (msdu) {
1397 void *rx_desc;
1398 int discard, inspect, dummy_fwd;
1399 qdf_nbuf_t next = qdf_nbuf_next(msdu);
1400
1401 rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, msdu);
1402 /* for HL, point to payload right now*/
1403 if (pdev->cfg.is_high_latency) {
1404 qdf_nbuf_pull_head(msdu,
1405 htt_rx_msdu_rx_desc_size_hl(htt_pdev, rx_desc));
1406 }
1407
1408 #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
1409 info.is_msdu_cmpl_mpdu =
1410 htt_rx_msdu_desc_completes_mpdu(htt_pdev, rx_desc);
1411 info.is_first_subfrm =
1412 htt_rx_msdu_first_msdu_flag(htt_pdev, rx_desc);
1413 if (OL_RX_DECAP(vdev, peer, msdu, &info) != A_OK) {
1414 discard = 1;
1415 ol_txrx_dbg(
1416 "decap error %pK from peer %pK ("QDF_MAC_ADDR_FMT") len %d\n",
1417 msdu, peer,
1418 QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1419 qdf_nbuf_len(msdu));
1420 goto DONE;
1421 }
1422 #endif
1423 htt_rx_msdu_actions(pdev->htt_pdev, rx_desc, &discard,
1424 &dummy_fwd, &inspect);
1425 if (inspect)
1426 ol_rx_inspect(vdev, peer, tid, msdu, rx_desc);
1427
1428 /*
1429 * Check the first msdu in the mpdu, if it will be filtered out,
1430 * then discard the entire mpdu.
1431 */
1432 if (htt_rx_msdu_first_msdu_flag(htt_pdev, rx_desc))
1433 filter = ol_rx_filter(vdev, peer, msdu, rx_desc);
1434
1435 #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
1436 DONE:
1437 #endif
1438 htt_rx_msdu_desc_free(htt_pdev, msdu);
1439 if (discard || (true == filter)) {
1440 ol_txrx_frms_dump("rx discarding:",
1441 pdev, deliver_list_head,
1442 ol_txrx_frm_dump_tcp_seq |
1443 ol_txrx_frm_dump_contents,
1444 0 /* don't print contents */);
1445 qdf_nbuf_free(msdu);
1446 /*
1447 * If discarding packet is last packet of the delivery
1448 * list, NULL terminator should be added
1449 * for delivery list.
1450 */
1451 if (!next && deliver_list_head) {
1452 /* add NULL terminator */
1453 qdf_nbuf_set_next(deliver_list_tail, NULL);
1454 }
1455 } else {
1456 /*
1457 * If this is for OCB,
1458 * then prepend the RX stats header.
1459 */
1460 if (vdev->opmode == wlan_op_mode_ocb)
1461 ol_rx_ocb_prepare_rx_stats_header(vdev, peer,
1462 msdu);
1463
1464 OL_RX_PEER_STATS_UPDATE(peer, msdu);
1465 OL_RX_ERR_STATISTICS_1(pdev, vdev, peer, rx_desc,
1466 OL_RX_ERR_NONE);
1467 TXRX_STATS_MSDU_INCR(vdev->pdev, rx.delivered, msdu);
1468
1469 ol_rx_timestamp(pdev->ctrl_pdev, rx_desc, msdu);
1470 OL_TXRX_LIST_APPEND(deliver_list_head,
1471 deliver_list_tail, msdu);
1472 QDF_NBUF_CB_DP_TRACE_PRINT(msdu) = false;
1473 qdf_dp_trace_set_track(msdu, QDF_RX);
1474 }
1475 msdu = next;
1476 }
1477 /* sanity check - are there any frames left to give to the OS shim? */
1478 if (!deliver_list_head)
1479 return;
1480
1481 ol_txrx_frms_dump("rx delivering:",
1482 pdev, deliver_list_head,
1483 ol_txrx_frm_dump_tcp_seq | ol_txrx_frm_dump_contents,
1484 0 /* don't print contents */);
1485
1486 ol_rx_data_process(peer, deliver_list_head);
1487 }
1488 #endif
1489
1490 void
ol_rx_discard(struct ol_txrx_vdev_t * vdev,struct ol_txrx_peer_t * peer,unsigned int tid,qdf_nbuf_t msdu_list)1491 ol_rx_discard(struct ol_txrx_vdev_t *vdev,
1492 struct ol_txrx_peer_t *peer, unsigned int tid,
1493 qdf_nbuf_t msdu_list)
1494 {
1495 while (msdu_list) {
1496 qdf_nbuf_t msdu = msdu_list;
1497
1498 msdu_list = qdf_nbuf_next(msdu_list);
1499 ol_txrx_dbg("discard rx %pK", msdu);
1500 qdf_nbuf_free(msdu);
1501 }
1502 }
1503
ol_rx_peer_init(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer)1504 void ol_rx_peer_init(struct ol_txrx_pdev_t *pdev, struct ol_txrx_peer_t *peer)
1505 {
1506 uint8_t tid;
1507
1508 for (tid = 0; tid < OL_TXRX_NUM_EXT_TIDS; tid++) {
1509 ol_rx_reorder_init(&peer->tids_rx_reorder[tid], tid);
1510
1511 /* invalid sequence number */
1512 peer->tids_last_seq[tid] = IEEE80211_SEQ_MAX;
1513 /* invalid reorder index number */
1514 peer->tids_next_rel_idx[tid] = INVALID_REORDER_INDEX;
1515
1516 }
1517 /*
1518 * Set security defaults: no PN check, no security.
1519 * The target may send a HTT SEC_IND message to overwrite
1520 * these defaults.
1521 */
1522 peer->security[txrx_sec_ucast].sec_type =
1523 peer->security[txrx_sec_mcast].sec_type = htt_sec_type_none;
1524 peer->keyinstalled = 0;
1525
1526 peer->last_assoc_rcvd = 0;
1527 peer->last_disassoc_rcvd = 0;
1528 peer->last_deauth_rcvd = 0;
1529
1530 qdf_atomic_init(&peer->fw_pn_check);
1531 }
1532
1533 void
ol_rx_peer_cleanup(struct ol_txrx_vdev_t * vdev,struct ol_txrx_peer_t * peer)1534 ol_rx_peer_cleanup(struct ol_txrx_vdev_t *vdev, struct ol_txrx_peer_t *peer)
1535 {
1536 peer->keyinstalled = 0;
1537 peer->last_assoc_rcvd = 0;
1538 peer->last_disassoc_rcvd = 0;
1539 peer->last_deauth_rcvd = 0;
1540 ol_rx_reorder_peer_cleanup(vdev, peer);
1541 }
1542
1543 /*
1544 * Free frames including both rx descriptors and buffers
1545 */
ol_rx_frames_free(htt_pdev_handle htt_pdev,qdf_nbuf_t frames)1546 void ol_rx_frames_free(htt_pdev_handle htt_pdev, qdf_nbuf_t frames)
1547 {
1548 qdf_nbuf_t next, frag = frames;
1549
1550 while (frag) {
1551 next = qdf_nbuf_next(frag);
1552 htt_rx_desc_frame_free(htt_pdev, frag);
1553 frag = next;
1554 }
1555 }
1556
1557 #ifdef WLAN_FULL_REORDER_OFFLOAD
1558 void
ol_rx_in_order_indication_handler(ol_txrx_pdev_handle pdev,qdf_nbuf_t rx_ind_msg,uint16_t peer_id,uint8_t tid,uint8_t is_offload)1559 ol_rx_in_order_indication_handler(ol_txrx_pdev_handle pdev,
1560 qdf_nbuf_t rx_ind_msg,
1561 uint16_t peer_id,
1562 uint8_t tid, uint8_t is_offload)
1563 {
1564 struct ol_txrx_vdev_t *vdev = NULL;
1565 struct ol_txrx_peer_t *peer = NULL;
1566 struct ol_txrx_peer_t *peer_head = NULL;
1567 htt_pdev_handle htt_pdev = NULL;
1568 int status;
1569 qdf_nbuf_t head_msdu = NULL, tail_msdu = NULL;
1570 uint8_t *rx_ind_data;
1571 uint32_t *msg_word;
1572 uint32_t msdu_count;
1573 uint8_t pktlog_bit;
1574 uint32_t filled = 0;
1575 uint8_t bssid[QDF_MAC_ADDR_SIZE];
1576 bool offloaded_pkt;
1577 struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
1578
1579 if (qdf_unlikely(!soc))
1580 return;
1581
1582 if (tid >= OL_TXRX_NUM_EXT_TIDS) {
1583 ol_txrx_err("invalid tid, %u", tid);
1584 WARN_ON(1);
1585 return;
1586 }
1587
1588 if (pdev) {
1589 if (qdf_unlikely(QDF_GLOBAL_MONITOR_MODE == cds_get_conparam()))
1590 peer = pdev->self_peer;
1591 else
1592 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
1593 htt_pdev = pdev->htt_pdev;
1594 } else {
1595 ol_txrx_err("Invalid pdev passed!");
1596 qdf_assert_always(pdev);
1597 return;
1598 }
1599
1600 #if defined(HELIUMPLUS_DEBUG)
1601 qdf_print("rx_ind_msg 0x%pK peer_id %d tid %d is_offload %d",
1602 rx_ind_msg, peer_id, tid, is_offload);
1603 #endif
1604
1605 pktlog_bit = (htt_rx_amsdu_rx_in_order_get_pktlog(rx_ind_msg) == 0x01);
1606 rx_ind_data = qdf_nbuf_data(rx_ind_msg);
1607 msg_word = (uint32_t *)rx_ind_data;
1608 /* Get the total number of MSDUs */
1609 msdu_count = HTT_RX_IN_ORD_PADDR_IND_MSDU_CNT_GET(*(msg_word + 1));
1610
1611 ol_rx_ind_record_event(msdu_count, OL_RX_INDICATION_POP_START);
1612
1613 /*
1614 * Get a linked list of the MSDUs in the rx in order indication.
1615 * This also attaches each rx MSDU descriptor to the
1616 * corresponding rx MSDU network buffer.
1617 */
1618 status = htt_rx_amsdu_pop(htt_pdev, rx_ind_msg, &head_msdu,
1619 &tail_msdu, &msdu_count);
1620 ol_rx_ind_record_event(status, OL_RX_INDICATION_POP_END);
1621
1622 if (qdf_unlikely(0 == status)) {
1623 ol_txrx_warn("pop failed");
1624 return;
1625 }
1626
1627 /*
1628 * Replenish the rx buffer ring first to provide buffers to the target
1629 * rather than waiting for the indeterminate time taken by the OS
1630 * to consume the rx frames
1631 */
1632 filled = htt_rx_msdu_buff_in_order_replenish(htt_pdev, msdu_count);
1633 ol_rx_ind_record_event(filled, OL_RX_INDICATION_BUF_REPLENISH);
1634
1635 if (!head_msdu) {
1636 ol_txrx_dbg("No packet to send to HDD");
1637 return;
1638 }
1639
1640 /* Send the chain of MSDUs to the OS */
1641 /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
1642 qdf_nbuf_set_next(tail_msdu, NULL);
1643
1644 /* Packet Capture Mode */
1645
1646 if ((ucfg_pkt_capture_get_pktcap_mode((void *)soc->psoc) &
1647 PKT_CAPTURE_MODE_DATA_ONLY)) {
1648 offloaded_pkt = ucfg_pkt_capture_rx_offloaded_pkt(rx_ind_msg);
1649 if (peer) {
1650 vdev = peer->vdev;
1651 if (peer->vdev) {
1652 qdf_spin_lock_bh(&pdev->peer_ref_mutex);
1653 peer_head = TAILQ_FIRST(&vdev->peer_list);
1654 qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
1655 if (peer_head) {
1656 qdf_spin_lock_bh(
1657 &peer_head->peer_info_lock);
1658 qdf_mem_copy(bssid,
1659 &peer_head->mac_addr.raw,
1660 QDF_MAC_ADDR_SIZE);
1661 qdf_spin_unlock_bh(
1662 &peer_head->peer_info_lock);
1663
1664 ucfg_pkt_capture_rx_msdu_process(
1665 bssid, head_msdu,
1666 peer->vdev->vdev_id,
1667 htt_pdev);
1668 }
1669 }
1670 } else if (offloaded_pkt) {
1671 ucfg_pkt_capture_rx_msdu_process(
1672 bssid, head_msdu,
1673 HTT_INVALID_VDEV,
1674 htt_pdev);
1675
1676 ucfg_pkt_capture_rx_drop_offload_pkt(head_msdu);
1677 return;
1678 }
1679 }
1680
1681 /* Pktlog */
1682 ol_rx_send_pktlog_event(pdev, peer, head_msdu, pktlog_bit);
1683
1684 /*
1685 * if this is an offload indication, peer id is carried in the
1686 * rx buffer
1687 */
1688 if (peer) {
1689 vdev = peer->vdev;
1690 } else {
1691 ol_txrx_dbg("Couldn't find peer from ID 0x%x", peer_id);
1692 while (head_msdu) {
1693 qdf_nbuf_t msdu = head_msdu;
1694
1695 head_msdu = qdf_nbuf_next(head_msdu);
1696 TXRX_STATS_MSDU_INCR(pdev,
1697 rx.dropped_peer_invalid, msdu);
1698 htt_rx_desc_frame_free(htt_pdev, msdu);
1699 }
1700 return;
1701 }
1702
1703 /*Loop msdu to fill tstamp with tsf64 time in ol_rx_timestamp*/
1704 ol_rx_timestamp_update(pdev, head_msdu, tail_msdu);
1705
1706 peer->rx_opt_proc(vdev, peer, tid, head_msdu);
1707 }
1708 #endif
1709
1710 #ifdef CONNECTIVITY_PKTLOG
1711 /**
1712 * ol_rx_pkt_dump_call() - updates status and
1713 * calls packetdump callback to log rx packet
1714 *
1715 * @msdu: rx packet
1716 * @peer_id: peer id
1717 * @status: status of rx packet
1718 *
1719 * This function is used to update the status of rx packet
1720 * and then calls packetdump callback to log that packet.
1721 *
1722 * Return: None
1723 *
1724 */
ol_rx_pkt_dump_call(qdf_nbuf_t msdu,uint8_t peer_id,uint8_t status)1725 void ol_rx_pkt_dump_call(
1726 qdf_nbuf_t msdu,
1727 uint8_t peer_id,
1728 uint8_t status)
1729 {
1730 struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
1731 ol_txrx_soc_handle soc_hdl = ol_txrx_soc_t_to_cdp_soc_t(soc);
1732 struct ol_txrx_peer_t *peer = NULL;
1733 ol_txrx_pktdump_cb packetdump_cb;
1734 ol_txrx_pdev_handle pdev;
1735
1736 if (qdf_unlikely(!soc))
1737 return;
1738
1739 pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
1740 if (!pdev) {
1741 ol_txrx_err("pdev is NULL");
1742 return;
1743 }
1744
1745 peer = ol_txrx_peer_find_by_id(pdev, peer_id);
1746 if (!peer) {
1747 ol_txrx_dbg("peer with peer id %d is NULL", peer_id);
1748 return;
1749 }
1750
1751 packetdump_cb = pdev->ol_rx_packetdump_cb;
1752 if (packetdump_cb &&
1753 wlan_op_mode_sta == peer->vdev->opmode)
1754 packetdump_cb(soc_hdl, OL_TXRX_PDEV_ID, peer->vdev->vdev_id,
1755 msdu, status, QDF_RX_DATA_PKT);
1756 }
1757 #endif
1758
1759 #ifdef WLAN_FULL_REORDER_OFFLOAD
1760 /* the msdu_list passed here must be NULL terminated */
1761 void
ol_rx_in_order_deliver(struct ol_txrx_vdev_t * vdev,struct ol_txrx_peer_t * peer,unsigned int tid,qdf_nbuf_t msdu_list)1762 ol_rx_in_order_deliver(struct ol_txrx_vdev_t *vdev,
1763 struct ol_txrx_peer_t *peer,
1764 unsigned int tid, qdf_nbuf_t msdu_list)
1765 {
1766 qdf_nbuf_t msdu;
1767
1768 msdu = msdu_list;
1769 /*
1770 * Currently, this does not check each MSDU to see whether it requires
1771 * special handling. MSDUs that need special handling (example: IGMP
1772 * frames) should be sent via a separate HTT message. Also, this does
1773 * not do rx->tx forwarding or filtering.
1774 */
1775
1776 while (msdu) {
1777 qdf_nbuf_t next = qdf_nbuf_next(msdu);
1778
1779 DPTRACE(qdf_dp_trace(msdu,
1780 QDF_DP_TRACE_RX_TXRX_PACKET_PTR_RECORD,
1781 QDF_TRACE_DEFAULT_PDEV_ID,
1782 qdf_nbuf_data_addr(msdu),
1783 sizeof(qdf_nbuf_data(msdu)), QDF_RX));
1784
1785 OL_RX_PEER_STATS_UPDATE(peer, msdu);
1786 OL_RX_ERR_STATISTICS_1(vdev->pdev, vdev, peer, rx_desc,
1787 OL_RX_ERR_NONE);
1788 TXRX_STATS_MSDU_INCR(vdev->pdev, rx.delivered, msdu);
1789
1790 msdu = next;
1791 }
1792
1793 ol_txrx_frms_dump("rx delivering:",
1794 pdev, deliver_list_head,
1795 ol_txrx_frm_dump_tcp_seq | ol_txrx_frm_dump_contents,
1796 0 /* don't print contents */);
1797
1798 ol_rx_data_process(peer, msdu_list);
1799 }
1800 #endif
1801
1802 #ifndef CONFIG_HL_SUPPORT
1803 void
ol_rx_offload_paddr_deliver_ind_handler(htt_pdev_handle htt_pdev,uint32_t msdu_count,uint32_t * msg_word)1804 ol_rx_offload_paddr_deliver_ind_handler(htt_pdev_handle htt_pdev,
1805 uint32_t msdu_count,
1806 uint32_t *msg_word)
1807 {
1808 int vdev_id, peer_id, tid;
1809 qdf_nbuf_t head_buf, tail_buf, buf;
1810 struct ol_txrx_peer_t *peer;
1811 uint8_t fw_desc;
1812 int msdu_iter = 0;
1813
1814 while (msdu_count) {
1815 if (htt_rx_offload_paddr_msdu_pop_ll(
1816 htt_pdev, msg_word, msdu_iter,
1817 &vdev_id, &peer_id, &tid,
1818 &fw_desc, &head_buf,
1819 &tail_buf)) {
1820 msdu_iter++;
1821 msdu_count--;
1822 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1823 "skip msg_word %pK, msdu #%d, continue next",
1824 msg_word, msdu_iter);
1825 continue;
1826 }
1827
1828 peer = ol_txrx_peer_find_by_id(htt_pdev->txrx_pdev, peer_id);
1829 if (peer) {
1830 QDF_NBUF_CB_DP_TRACE_PRINT(head_buf) = false;
1831 qdf_dp_trace_set_track(head_buf, QDF_RX);
1832 QDF_NBUF_CB_TX_PACKET_TRACK(head_buf) =
1833 QDF_NBUF_TX_PKT_DATA_TRACK;
1834 qdf_dp_trace_log_pkt(peer->vdev->vdev_id,
1835 head_buf, QDF_RX,
1836 QDF_TRACE_DEFAULT_PDEV_ID,
1837 peer->vdev->qdf_opmode);
1838 DPTRACE(qdf_dp_trace(head_buf,
1839 QDF_DP_TRACE_RX_OFFLOAD_HTT_PACKET_PTR_RECORD,
1840 QDF_TRACE_DEFAULT_PDEV_ID,
1841 qdf_nbuf_data_addr(head_buf),
1842 sizeof(qdf_nbuf_data(head_buf)), QDF_RX));
1843 ol_rx_data_process(peer, head_buf);
1844 } else {
1845 buf = head_buf;
1846 while (1) {
1847 qdf_nbuf_t next;
1848
1849 next = qdf_nbuf_next(buf);
1850 htt_rx_desc_frame_free(htt_pdev, buf);
1851 if (buf == tail_buf)
1852 break;
1853 buf = next;
1854 }
1855 }
1856 msdu_iter++;
1857 msdu_count--;
1858 }
1859 htt_rx_msdu_buff_replenish(htt_pdev);
1860 }
1861 #endif
1862
1863 #ifdef FEATURE_MONITOR_MODE_SUPPORT
1864 /**
1865 * ol_htt_mon_note_chan() - Update monitor channel information
1866 * @pdev: handle to the physical device
1867 * @mon_ch: Monitor channel
1868 *
1869 * Return: None
1870 */
ol_htt_mon_note_chan(struct cdp_pdev * ppdev,int mon_ch)1871 void ol_htt_mon_note_chan(struct cdp_pdev *ppdev, int mon_ch)
1872 {
1873 struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
1874
1875 htt_rx_mon_note_capture_channel(pdev->htt_pdev, mon_ch);
1876 }
1877 #endif
1878
1879 #ifdef NEVERDEFINED
1880 /**
1881 * @brief populates vow ext stats in given network buffer.
1882 * @param msdu - network buffer handle
1883 * @param pdev - handle to htt dev.
1884 */
ol_ath_add_vow_extstats(htt_pdev_handle pdev,qdf_nbuf_t msdu)1885 void ol_ath_add_vow_extstats(htt_pdev_handle pdev, qdf_nbuf_t msdu)
1886 {
1887 /* FIX THIS:
1888 * txrx should not be directly using data types (scn)
1889 * that are internal to other modules.
1890 */
1891 struct ol_ath_softc_net80211 *scn =
1892 (struct ol_ath_softc_net80211 *)pdev->ctrl_pdev;
1893 uint8_t *data, *l3_hdr, *bp;
1894 uint16_t ethertype;
1895 int offset;
1896 struct vow_extstats vowstats;
1897
1898 if (scn->vow_extstats == 0)
1899 return;
1900
1901 data = qdf_nbuf_data(msdu);
1902
1903 offset = QDF_MAC_ADDR_SIZE * 2;
1904 l3_hdr = data + ETHERNET_HDR_LEN;
1905 ethertype = (data[offset] << 8) | data[offset + 1];
1906 if (ethertype == ETHERTYPE_IPV4) {
1907 offset = IPV4_HDR_OFFSET_PROTOCOL;
1908 if ((l3_hdr[offset] == IP_PROTOCOL_UDP) &&
1909 (l3_hdr[0] == IP_VER4_N_NO_EXTRA_HEADERS)) {
1910 bp = data + EXT_HDR_OFFSET;
1911
1912 if ((data[RTP_HDR_OFFSET] == UDP_PDU_RTP_EXT) &&
1913 (bp[0] == 0x12) &&
1914 (bp[1] == 0x34) &&
1915 (bp[2] == 0x00) && (bp[3] == 0x08)) {
1916 /*
1917 * Clear UDP checksum so we do not have
1918 * to recalculate it
1919 * after filling in status fields.
1920 */
1921 data[UDP_CKSUM_OFFSET] = 0;
1922 data[(UDP_CKSUM_OFFSET + 1)] = 0;
1923
1924 bp += IPERF3_DATA_OFFSET;
1925
1926 htt_rx_get_vowext_stats(msdu,
1927 &vowstats);
1928
1929 /* control channel RSSI */
1930 *bp++ = vowstats.rx_rssi_ctl0;
1931 *bp++ = vowstats.rx_rssi_ctl1;
1932 *bp++ = vowstats.rx_rssi_ctl2;
1933
1934 /* rx rate info */
1935 *bp++ = vowstats.rx_bw;
1936 *bp++ = vowstats.rx_sgi;
1937 *bp++ = vowstats.rx_nss;
1938
1939 *bp++ = vowstats.rx_rssi_comb;
1940 /* rsflags */
1941 *bp++ = vowstats.rx_rs_flags;
1942
1943 /* Time stamp Lo */
1944 *bp++ = (uint8_t)
1945 ((vowstats.
1946 rx_macTs & 0x0000ff00) >> 8);
1947 *bp++ = (uint8_t)
1948 (vowstats.rx_macTs & 0x0000ff);
1949 /* rx phy errors */
1950 *bp++ = (uint8_t)
1951 ((scn->chan_stats.
1952 phy_err_cnt >> 8) & 0xff);
1953 *bp++ =
1954 (uint8_t) (scn->chan_stats.
1955 phy_err_cnt & 0xff);
1956 /* rx clear count */
1957 *bp++ = (uint8_t)
1958 ((scn->mib_cycle_cnts.
1959 rx_clear_count >> 24) & 0xff);
1960 *bp++ = (uint8_t)
1961 ((scn->mib_cycle_cnts.
1962 rx_clear_count >> 16) & 0xff);
1963 *bp++ = (uint8_t)
1964 ((scn->mib_cycle_cnts.
1965 rx_clear_count >> 8) & 0xff);
1966 *bp++ = (uint8_t)
1967 (scn->mib_cycle_cnts.
1968 rx_clear_count & 0xff);
1969 /* rx cycle count */
1970 *bp++ = (uint8_t)
1971 ((scn->mib_cycle_cnts.
1972 cycle_count >> 24) & 0xff);
1973 *bp++ = (uint8_t)
1974 ((scn->mib_cycle_cnts.
1975 cycle_count >> 16) & 0xff);
1976 *bp++ = (uint8_t)
1977 ((scn->mib_cycle_cnts.
1978 cycle_count >> 8) & 0xff);
1979 *bp++ = (uint8_t)
1980 (scn->mib_cycle_cnts.
1981 cycle_count & 0xff);
1982
1983 *bp++ = vowstats.rx_ratecode;
1984 *bp++ = vowstats.rx_moreaggr;
1985
1986 /* sequence number */
1987 *bp++ = (uint8_t)
1988 ((vowstats.rx_seqno >> 8) &
1989 0xff);
1990 *bp++ = (uint8_t)
1991 (vowstats.rx_seqno & 0xff);
1992 }
1993 }
1994 }
1995 }
1996
1997 #endif
1998
1999 #ifdef WLAN_CFR_ENABLE
ol_rx_cfr_capture_msg_handler(qdf_nbuf_t htt_t2h_msg)2000 void ol_rx_cfr_capture_msg_handler(qdf_nbuf_t htt_t2h_msg)
2001 {
2002 struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
2003 HTT_PEER_CFR_CAPTURE_MSG_TYPE cfr_type;
2004 struct htt_cfr_dump_compl_ind *cfr_dump;
2005 struct htt_cfr_dump_ind_type_1 cfr_ind;
2006 struct csi_cfr_header cfr_hdr = {};
2007 uint32_t mem_index, req_id, vdev_id;
2008 uint32_t *msg_word;
2009 uint8_t *mac_addr;
2010
2011 msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg);
2012
2013 /* First payload word */
2014 msg_word++;
2015 cfr_dump = (struct htt_cfr_dump_compl_ind *)msg_word;
2016 cfr_type = cfr_dump->msg_type;
2017 if (cfr_type != HTT_PEER_CFR_CAPTURE_MSG_TYPE_1) {
2018 ol_txrx_err("Unsupported cfr msg type 0x%x", cfr_type);
2019 return;
2020 }
2021
2022 /* Second payload word */
2023 msg_word++;
2024 req_id = HTT_T2H_CFR_DUMP_TYPE1_MEM_REQ_ID_GET(*msg_word);
2025 if (req_id != CFR_CAPTURE_HOST_MEM_REQ_ID) {
2026 ol_txrx_err("Invalid req id in cfr capture msg");
2027 return;
2028 }
2029 cfr_hdr.start_magic_num = 0xDEADBEAF;
2030 cfr_hdr.u.meta_v1.status = HTT_T2H_CFR_DUMP_TYPE1_STATUS_GET(
2031 *msg_word);
2032 cfr_hdr.u.meta_v1.capture_bw = HTT_T2H_CFR_DUMP_TYPE1_CAP_BW_GET(
2033 *msg_word);
2034 cfr_hdr.u.meta_v1.capture_mode = HTT_T2H_CFR_DUMP_TYPE1_MODE_GET(
2035 *msg_word);
2036 cfr_hdr.u.meta_v1.sts_count = HTT_T2H_CFR_DUMP_TYPE1_STS_GET(
2037 *msg_word);
2038 cfr_hdr.u.meta_v1.channel_bw = HTT_T2H_CFR_DUMP_TYPE1_CHAN_BW_GET(
2039 *msg_word);
2040 cfr_hdr.u.meta_v1.capture_type = HTT_T2H_CFR_DUMP_TYPE1_CAP_TYPE_GET(
2041 *msg_word);
2042
2043 vdev_id = HTT_T2H_CFR_DUMP_TYPE1_VDEV_ID_GET(*msg_word);
2044
2045 mac_addr = (uint8_t *)(msg_word + 1);
2046 qdf_mem_copy(cfr_hdr.u.meta_v1.peer_addr, mac_addr, QDF_MAC_ADDR_SIZE);
2047
2048 cfr_ind = cfr_dump->htt_cfr_dump_compl_ind_type_1;
2049
2050 cfr_hdr.u.meta_v1.prim20_chan = cfr_ind.chan.chan_mhz;
2051 cfr_hdr.u.meta_v1.center_freq1 = cfr_ind.chan.band_center_freq1;
2052 cfr_hdr.u.meta_v1.center_freq2 = cfr_ind.chan.band_center_freq2;
2053 cfr_hdr.u.meta_v1.phy_mode = cfr_ind.chan.chan_mode;
2054 cfr_hdr.u.meta_v1.length = cfr_ind.length;
2055 cfr_hdr.u.meta_v1.timestamp = cfr_ind.timestamp;
2056
2057 mem_index = cfr_ind.index;
2058
2059 ucfg_cfr_capture_data((void *)soc->psoc, vdev_id, &cfr_hdr, mem_index);
2060 }
2061 #endif
2062