1 /*
2 * Copyright (c) 2011, 2014-2019, 2021 The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 /* standard header files */
20 #include <qdf_nbuf.h> /* qdf_nbuf_map */
21 #include <qdf_mem.h> /* qdf_mem_cmp */
22
23 /* external header files */
24 #include <ol_cfg.h> /* wlan_op_mode_ap, etc. */
25 #include <ol_htt_rx_api.h> /* htt_rx_msdu_desc_retrieve */
26 #include <cds_ieee80211_common.h> /* ieee80211_frame, etc. */
27
28 /* internal header files */
29 #include <ol_rx_fwd.h> /* our own defs */
30 #include <ol_rx.h> /* ol_rx_deliver */
31 #include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
32 #include <ol_tx.h>
33 #include <ol_txrx.h>
34
35 /*
36 * Porting from Ap11PrepareForwardedPacket.
37 * This routine is called when a RX data frame from an associated station is
38 * to be forwarded to another associated station. We will prepare the
39 * received packet so that it is suitable for transmission again.
40 * Check that this Packet is suitable for forwarding. If yes, then
41 * prepare the new 802.11 header.
42 */
ol_ap_fwd_check(struct ol_txrx_vdev_t * vdev,qdf_nbuf_t msdu)43 static inline void ol_ap_fwd_check(struct ol_txrx_vdev_t *vdev, qdf_nbuf_t msdu)
44 {
45 struct ieee80211_frame *mac_header;
46 unsigned char tmp_addr[QDF_MAC_ADDR_SIZE];
47 unsigned char type;
48 unsigned char subtype;
49 unsigned char fromds;
50 unsigned char tods;
51
52 mac_header = (struct ieee80211_frame *)(qdf_nbuf_data(msdu));
53 TXRX_ASSERT1(mac_header);
54
55 type = mac_header->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
56 subtype = mac_header->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
57 tods = mac_header->i_fc[1] & IEEE80211_FC1_DIR_TODS;
58 fromds = mac_header->i_fc[1] & IEEE80211_FC1_DIR_FROMDS;
59
60 /*
61 * Make sure no QOS or any other non-data subtype
62 * Should be a ToDs data frame.
63 * Make sure that this frame is unicast and not for us.
64 * These packets should come up through the normal rx path and
65 * not forwarded.
66 */
67 if (type != IEEE80211_FC0_TYPE_DATA ||
68 subtype != 0x0 ||
69 ((tods != 1) || (fromds != 0)) ||
70 qdf_mem_cmp
71 (mac_header->i_addr3, vdev->mac_addr.raw,
72 QDF_MAC_ADDR_SIZE)) {
73 ol_txrx_dbg("Exit | Unnecessary to adjust mac header");
74 } else {
75 /* Flip the ToDs bit to FromDs */
76 mac_header->i_fc[1] &= 0xfe;
77 mac_header->i_fc[1] |= 0x2;
78
79 /*
80 * Flip the addresses
81 * (ToDs, addr1, RA=BSSID) move to (FrDs, addr2, TA=BSSID)
82 * (ToDs, addr2, SA) move to (FrDs, addr3, SA)
83 * (ToDs, addr3, DA) move to (FrDs, addr1, DA)
84 */
85
86 memcpy(tmp_addr, mac_header->i_addr2, sizeof(tmp_addr));
87
88 memcpy(mac_header->i_addr2,
89 mac_header->i_addr1, sizeof(tmp_addr));
90
91 memcpy(mac_header->i_addr1,
92 mac_header->i_addr3, sizeof(tmp_addr));
93
94 memcpy(mac_header->i_addr3, tmp_addr, sizeof(tmp_addr));
95 }
96 }
97
ol_rx_fwd_to_tx(struct ol_txrx_vdev_t * vdev,qdf_nbuf_t msdu)98 static inline void ol_rx_fwd_to_tx(struct ol_txrx_vdev_t *vdev, qdf_nbuf_t msdu)
99 {
100 struct ol_txrx_pdev_t *pdev = vdev->pdev;
101
102 if (pdev->frame_format == wlan_frm_fmt_native_wifi)
103 ol_ap_fwd_check(vdev, msdu);
104
105 /*
106 * Map the netbuf, so it's accessible to the DMA that
107 * sends it to the target.
108 */
109 qdf_nbuf_set_next(msdu, NULL); /* add NULL terminator */
110
111 /* for HL, point to payload before send to tx again.*/
112 if (pdev->cfg.is_high_latency) {
113 void *rx_desc;
114
115 rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev,
116 msdu);
117 qdf_nbuf_pull_head(msdu,
118 htt_rx_msdu_rx_desc_size_hl(pdev->htt_pdev,
119 rx_desc));
120 }
121
122 /* Clear the msdu control block as it will be re-interpreted */
123 qdf_mem_zero(msdu->cb, sizeof(msdu->cb));
124 /* update any cb field expected by OL_TX_SEND */
125
126 msdu = OL_TX_SEND(vdev, msdu);
127
128 if (msdu) {
129 /*
130 * The frame was not accepted by the tx.
131 * We could store the frame and try again later,
132 * but the simplest solution is to discard the frames.
133 */
134 qdf_nbuf_tx_free(msdu, QDF_NBUF_PKT_ERROR);
135 }
136 }
137
138 void
ol_rx_fwd_check(struct ol_txrx_vdev_t * vdev,struct ol_txrx_peer_t * peer,unsigned int tid,qdf_nbuf_t msdu_list)139 ol_rx_fwd_check(struct ol_txrx_vdev_t *vdev,
140 struct ol_txrx_peer_t *peer,
141 unsigned int tid, qdf_nbuf_t msdu_list)
142 {
143 struct ol_txrx_pdev_t *pdev = vdev->pdev;
144 qdf_nbuf_t deliver_list_head = NULL;
145 qdf_nbuf_t deliver_list_tail = NULL;
146 qdf_nbuf_t msdu;
147
148 msdu = msdu_list;
149 while (msdu) {
150 struct ol_txrx_vdev_t *tx_vdev;
151 void *rx_desc;
152 uint16_t off = 0;
153 /*
154 * Remember the next list elem, because our processing
155 * may cause the MSDU to get linked into a different list.
156 */
157 msdu_list = qdf_nbuf_next(msdu);
158
159 rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, msdu);
160
161 if (!vdev->disable_intrabss_fwd &&
162 htt_rx_msdu_forward(pdev->htt_pdev, rx_desc)) {
163 /*
164 * Use the same vdev that received the frame to
165 * transmit the frame.
166 * This is exactly what we want for intra-BSS
167 * forwarding, like STA-to-STA forwarding and
168 * multicast echo.
169 * If this is a intra-BSS forwarding case (which is not
170 * currently supported), then the tx vdev is different
171 * from the rx vdev.
172 * On the LL host the vdevs are not actually used
173 * for tx, so it would still work to use the rx vdev
174 * rather than the tx vdev.
175 * For HL, the tx classification searches for the DA
176 * within the given vdev, so we would want to get the DA
177 * peer ID from the target, so we can locate
178 * the tx vdev.
179 */
180 tx_vdev = vdev;
181 /*
182 * Copying TID value of RX packet to forwarded
183 * packet if the tid is other than non qos tid.
184 * But for non qos tid fill invalid tid so that
185 * Fw will take care of filling proper tid.
186 */
187 if (tid != HTT_NON_QOS_TID) {
188 qdf_nbuf_set_tid(msdu, tid);
189 } else {
190 qdf_nbuf_set_tid(msdu,
191 QDF_NBUF_TX_EXT_TID_INVALID);
192 }
193
194 if (!ol_txrx_fwd_desc_thresh_check(vdev)) {
195 /* Drop the packet*/
196 htt_rx_msdu_desc_free(pdev->htt_pdev, msdu);
197 TXRX_STATS_MSDU_LIST_INCR(
198 pdev, tx.dropped.host_reject, msdu);
199 /* add NULL terminator */
200 qdf_nbuf_set_next(msdu, NULL);
201 qdf_nbuf_tx_free(msdu,
202 QDF_NBUF_PKT_ERROR);
203 msdu = msdu_list;
204 continue;
205 }
206
207 if (pdev->cfg.is_high_latency)
208 off = htt_rx_msdu_rx_desc_size_hl(
209 pdev->htt_pdev,
210 rx_desc);
211
212 if (vdev->opmode == wlan_op_mode_ap &&
213 __qdf_nbuf_data_is_ipv4_eapol_pkt(
214 qdf_nbuf_data(msdu) + off) &&
215 qdf_mem_cmp(qdf_nbuf_data(msdu) +
216 QDF_NBUF_DEST_MAC_OFFSET,
217 vdev->mac_addr.raw,
218 QDF_MAC_ADDR_SIZE)) {
219 TXRX_STATS_MSDU_LIST_INCR(
220 pdev, tx.dropped.host_reject, msdu);
221 qdf_nbuf_set_next(msdu, NULL);
222 qdf_nbuf_tx_free(msdu, QDF_NBUF_PKT_ERROR);
223 msdu = msdu_list;
224 continue;
225 }
226
227 /*
228 * This MSDU needs to be forwarded to the tx path.
229 * Check whether it also needs to be sent to the OS
230 * shim, in which case we need to make a copy
231 * (or clone?).
232 */
233 if (htt_rx_msdu_discard(pdev->htt_pdev, rx_desc)) {
234 htt_rx_msdu_desc_free(pdev->htt_pdev, msdu);
235 ol_rx_fwd_to_tx(tx_vdev, msdu);
236 msdu = NULL; /* already handled this MSDU */
237 tx_vdev->fwd_tx_packets++;
238 vdev->fwd_rx_packets++;
239 TXRX_STATS_ADD(pdev,
240 pub.rx.intra_bss_fwd.packets_fwd, 1);
241 } else {
242 qdf_nbuf_t copy;
243
244 copy = qdf_nbuf_copy(msdu);
245 if (copy) {
246 ol_rx_fwd_to_tx(tx_vdev, copy);
247 tx_vdev->fwd_tx_packets++;
248 }
249 TXRX_STATS_ADD(pdev,
250 pub.rx.intra_bss_fwd.packets_stack_n_fwd, 1);
251 }
252 } else {
253 TXRX_STATS_ADD(pdev,
254 pub.rx.intra_bss_fwd.packets_stack, 1);
255 }
256 if (msdu) {
257 /* send this frame to the OS */
258 OL_TXRX_LIST_APPEND(deliver_list_head,
259 deliver_list_tail, msdu);
260 }
261 msdu = msdu_list;
262 }
263 if (deliver_list_head) {
264 /* add NULL terminator */
265 qdf_nbuf_set_next(deliver_list_tail, NULL);
266 if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) {
267 ol_rx_in_order_deliver(vdev, peer, tid,
268 deliver_list_head);
269 } else {
270 ol_rx_deliver(vdev, peer, tid, deliver_list_head);
271 }
272 }
273 }
274
275 /*
276 * ol_get_intra_bss_fwd_pkts_count() - to get the total tx and rx packets
277 * that has been forwarded from txrx layer without going to upper layers.
278 * @soc_hdl: Datapath soc handle
279 * @vdev_id: vdev id
280 * @fwd_tx_packets: pointer to forwarded tx packets count parameter
281 * @fwd_rx_packets: pointer to forwarded rx packets count parameter
282 *
283 * Return: status -> A_OK - success, A_ERROR - failure
284 */
ol_get_intra_bss_fwd_pkts_count(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint64_t * fwd_tx_packets,uint64_t * fwd_rx_packets)285 A_STATUS ol_get_intra_bss_fwd_pkts_count(struct cdp_soc_t *soc_hdl,
286 uint8_t vdev_id,
287 uint64_t *fwd_tx_packets,
288 uint64_t *fwd_rx_packets)
289 {
290 struct ol_txrx_vdev_t *vdev = NULL;
291
292 vdev = (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
293 if (!vdev)
294 return A_ERROR;
295
296 *fwd_tx_packets = vdev->fwd_tx_packets;
297 *fwd_rx_packets = vdev->fwd_rx_packets;
298 return A_OK;
299 }
300
301