xref: /wlan-driver/qcacld-3.0/core/dp/txrx/ol_rx_pn.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1 /*
2  * Copyright (c) 2011, 2013-2017, 2019-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_nbuf.h>           /* qdf_nbuf_t */
20 
21 #include <ol_htt_rx_api.h>      /* htt_rx_pn_t, etc. */
22 #include <ol_ctrl_txrx_api.h>   /* ol_rx_err */
23 
24 #include <ol_txrx_internal.h>   /* ol_rx_mpdu_list_next */
25 #include <ol_rx_pn.h>           /* our own defs */
26 #include <ol_rx_fwd.h>          /* ol_rx_fwd_check */
27 #include <ol_rx.h>              /* ol_rx_deliver */
28 
29 /* add the MSDUs from this MPDU to the list of good frames */
30 #define ADD_MPDU_TO_LIST(head, tail, mpdu, mpdu_tail) do {		\
31 		if (!head) {						\
32 			head = mpdu;					\
33 		} else {						\
34 			qdf_nbuf_set_next(tail, mpdu);			\
35 		}							\
36 		tail = mpdu_tail;					\
37 	} while (0)
38 
ol_rx_pn_cmp24(union htt_rx_pn_t * new_pn,union htt_rx_pn_t * old_pn,int is_unicast,int opmode,bool strict_chk)39 int ol_rx_pn_cmp24(union htt_rx_pn_t *new_pn,
40 		   union htt_rx_pn_t *old_pn, int is_unicast, int opmode,
41 		   bool strict_chk)
42 {
43 	if (strict_chk)
44 		return ((new_pn->pn24 & 0xffffff) - (old_pn->pn24 & 0xffffff)
45 			!= 1);
46 	else
47 		return ((new_pn->pn24 & 0xffffff) <= (old_pn->pn24 & 0xffffff));
48 }
49 
ol_rx_pn_cmp48(union htt_rx_pn_t * new_pn,union htt_rx_pn_t * old_pn,int is_unicast,int opmode,bool strict_chk)50 int ol_rx_pn_cmp48(union htt_rx_pn_t *new_pn,
51 		   union htt_rx_pn_t *old_pn, int is_unicast, int opmode,
52 		   bool strict_chk)
53 {
54 	if (strict_chk)
55 		return ((new_pn->pn48 & 0xffffffffffffULL) -
56 			(old_pn->pn48 & 0xffffffffffffULL) != 1);
57 	else
58 		return ((new_pn->pn48 & 0xffffffffffffULL) <=
59 			(old_pn->pn48 & 0xffffffffffffULL));
60 }
61 
ol_rx_pn_wapi_cmp(union htt_rx_pn_t * new_pn,union htt_rx_pn_t * old_pn,int is_unicast,int opmode,bool strict_chk)62 int ol_rx_pn_wapi_cmp(union htt_rx_pn_t *new_pn,
63 		      union htt_rx_pn_t *old_pn, int is_unicast, int opmode,
64 		      bool strict_chk)
65 {
66 	int pn_is_replay = 0;
67 
68 	/* TODO Strick check for WAPI is not implemented*/
69 
70 	if (new_pn->pn128[1] == old_pn->pn128[1])
71 		pn_is_replay = (new_pn->pn128[0] <= old_pn->pn128[0]);
72 	else
73 		pn_is_replay = (new_pn->pn128[1] < old_pn->pn128[1]);
74 
75 	if (is_unicast) {
76 		if (opmode == wlan_op_mode_ap)
77 			pn_is_replay |= ((new_pn->pn128[0] & 0x1ULL) != 0);
78 		else
79 			pn_is_replay |= ((new_pn->pn128[0] & 0x1ULL) != 1);
80 	}
81 	return pn_is_replay;
82 }
83 
84 qdf_nbuf_t
ol_rx_pn_check_base(struct ol_txrx_vdev_t * vdev,struct ol_txrx_peer_t * peer,unsigned int tid,qdf_nbuf_t msdu_list,bool strict_chk)85 ol_rx_pn_check_base(struct ol_txrx_vdev_t *vdev,
86 		    struct ol_txrx_peer_t *peer,
87 		    unsigned int tid, qdf_nbuf_t msdu_list, bool strict_chk)
88 {
89 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
90 	union htt_rx_pn_t *last_pn;
91 	qdf_nbuf_t out_list_head = NULL;
92 	qdf_nbuf_t out_list_tail = NULL;
93 	qdf_nbuf_t mpdu;
94 	int index;              /* unicast vs. multicast */
95 	int pn_len;
96 	void *rx_desc;
97 	int last_pn_valid;
98 
99 	/* Make sure host pn check is not redundant */
100 	if ((qdf_atomic_read(&peer->fw_pn_check)) ||
101 		(vdev->opmode == wlan_op_mode_ibss)) {
102 		return msdu_list;
103 	}
104 
105 	/* First, check whether the PN check applies */
106 	rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, msdu_list);
107 	qdf_assert(htt_rx_msdu_has_wlan_mcast_flag(pdev->htt_pdev, rx_desc));
108 	index = htt_rx_msdu_is_wlan_mcast(pdev->htt_pdev, rx_desc) ?
109 		txrx_sec_mcast : txrx_sec_ucast;
110 	pn_len = pdev->rx_pn[peer->security[index].sec_type].len;
111 	if (pn_len == 0)
112 		return msdu_list;
113 
114 	last_pn_valid = peer->tids_last_pn_valid[tid];
115 	last_pn = &peer->tids_last_pn[tid];
116 	mpdu = msdu_list;
117 	while (mpdu) {
118 		qdf_nbuf_t mpdu_tail, next_mpdu;
119 		union htt_rx_pn_t new_pn;
120 		int pn_is_replay = 0;
121 
122 		rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, mpdu);
123 
124 		/*
125 		 * Find the last MSDU within this MPDU, and
126 		 * the find the first MSDU within the next MPDU.
127 		 */
128 		ol_rx_mpdu_list_next(pdev, mpdu, &mpdu_tail, &next_mpdu);
129 
130 		/* Don't check the PN replay for non-encrypted frames */
131 		if (!htt_rx_mpdu_is_encrypted(pdev->htt_pdev, rx_desc)) {
132 			ADD_MPDU_TO_LIST(out_list_head, out_list_tail,
133 					       mpdu, mpdu_tail);
134 			mpdu = next_mpdu;
135 			continue;
136 		}
137 
138 		/* retrieve PN from rx descriptor */
139 		htt_rx_mpdu_desc_pn(pdev->htt_pdev, rx_desc, &new_pn, pn_len);
140 
141 		/* if there was no prior PN, there's nothing to check */
142 		if (last_pn_valid) {
143 			pn_is_replay =
144 				pdev->rx_pn[peer->security[index].sec_type].
145 				cmp(&new_pn, last_pn, index == txrx_sec_ucast,
146 				    vdev->opmode, strict_chk);
147 		} else {
148 			last_pn_valid = peer->tids_last_pn_valid[tid] = 1;
149 		}
150 
151 		if (pn_is_replay) {
152 			qdf_nbuf_t msdu;
153 			static uint32_t last_pncheck_print_time /* = 0 */;
154 			uint32_t current_time_ms;
155 
156 			/*
157 			 * This MPDU failed the PN check:
158 			 * 1.  notify the control SW of the PN failure
159 			 *     (so countermeasures can be taken, if necessary)
160 			 * 2.  Discard all the MSDUs from this MPDU.
161 			 */
162 			msdu = mpdu;
163 			current_time_ms =
164 				qdf_system_ticks_to_msecs(qdf_system_ticks());
165 			if (TXRX_PN_CHECK_FAILURE_PRINT_PERIOD_MS <
166 			    (current_time_ms - last_pncheck_print_time)) {
167 				last_pncheck_print_time = current_time_ms;
168 				ol_txrx_warn(
169 				   "PN check failed - TID %d, peer %pK "
170 				   "("QDF_MAC_ADDR_FMT") %s\n"
171 				   "    old PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n"
172 				   "    new PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n"
173 				   "    new seq num = %d\n",
174 				   tid, peer,
175 				   QDF_MAC_ADDR_REF(peer->mac_addr.raw),
176 				   (index ==
177 				    txrx_sec_ucast) ? "ucast" : "mcast",
178 				   last_pn->pn128[1], last_pn->pn128[0],
179 				   last_pn->pn128[0] & 0xffffffffffffULL,
180 				   new_pn.pn128[1], new_pn.pn128[0],
181 				   new_pn.pn128[0] & 0xffffffffffffULL,
182 				   htt_rx_mpdu_desc_seq_num(pdev->htt_pdev,
183 							    rx_desc, false));
184 			} else {
185 				ol_txrx_dbg(
186 				   "PN check failed - TID %d, peer %pK "
187 				   "("QDF_MAC_ADDR_FMT") %s\n"
188 				   "    old PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n"
189 				   "    new PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n"
190 				   "    new seq num = %d\n",
191 				   tid, peer,
192 				   QDF_MAC_ADDR_REF(peer->mac_addr.raw),
193 				   (index ==
194 				    txrx_sec_ucast) ? "ucast" : "mcast",
195 				   last_pn->pn128[1], last_pn->pn128[0],
196 				   last_pn->pn128[0] & 0xffffffffffffULL,
197 				   new_pn.pn128[1], new_pn.pn128[0],
198 				   new_pn.pn128[0] & 0xffffffffffffULL,
199 				   htt_rx_mpdu_desc_seq_num(pdev->htt_pdev,
200 							    rx_desc, false));
201 			}
202 #if defined(ENABLE_RX_PN_TRACE)
203 			ol_rx_pn_trace_display(pdev, 1);
204 #endif /* ENABLE_RX_PN_TRACE */
205 			ol_rx_err(pdev->ctrl_pdev,
206 				  vdev->vdev_id, peer->mac_addr.raw, tid,
207 				  htt_rx_mpdu_desc_tsf32(pdev->htt_pdev,
208 							 rx_desc), OL_RX_ERR_PN,
209 				  mpdu, NULL, 0);
210 			/* free all MSDUs within this MPDU */
211 			do {
212 				qdf_nbuf_t next_msdu;
213 
214 				OL_RX_ERR_STATISTICS_1(pdev, vdev, peer,
215 						       rx_desc, OL_RX_ERR_PN);
216 				next_msdu = qdf_nbuf_next(msdu);
217 				htt_rx_desc_frame_free(pdev->htt_pdev, msdu);
218 				if (msdu == mpdu_tail)
219 					break;
220 				msdu = next_msdu;
221 			} while (1);
222 		} else {
223 			ADD_MPDU_TO_LIST(out_list_head, out_list_tail,
224 					       mpdu, mpdu_tail);
225 			/*
226 			 * Remember the new PN.
227 			 * For simplicity, just do 2 64-bit word copies to
228 			 * cover the worst case (WAPI), regardless of the length
229 			 * of the PN.
230 			 * This is more efficient than doing a conditional
231 			 * branch to copy only the relevant portion.
232 
233 			 * IWNCOM AP will send 1 packet with old PN after USK
234 			 * rekey, don't update last_pn when recv the packet, or
235 			 * PN check failed for later packets
236 			 */
237 			if ((peer->security[index].sec_type
238 				== htt_sec_type_wapi) &&
239 			    (peer->tids_rekey_flag[tid] == 1) &&
240 			    (index == txrx_sec_ucast)) {
241 				peer->tids_rekey_flag[tid] = 0;
242 			} else {
243 				last_pn->pn128[0] = new_pn.pn128[0];
244 				last_pn->pn128[1] = new_pn.pn128[1];
245 				OL_RX_PN_TRACE_ADD(pdev, peer, tid, rx_desc);
246 			}
247 		}
248 
249 		mpdu = next_mpdu;
250 	}
251 	/* make sure the list is null-terminated */
252 	if (out_list_tail)
253 		qdf_nbuf_set_next(out_list_tail, NULL);
254 
255 	return out_list_head;
256 }
257 
258 void
ol_rx_pn_check(struct ol_txrx_vdev_t * vdev,struct ol_txrx_peer_t * peer,unsigned int tid,qdf_nbuf_t msdu_list)259 ol_rx_pn_check(struct ol_txrx_vdev_t *vdev,
260 	       struct ol_txrx_peer_t *peer, unsigned int tid,
261 	       qdf_nbuf_t msdu_list)
262 {
263 	msdu_list = ol_rx_pn_check_base(vdev, peer, tid, msdu_list, false);
264 	ol_rx_fwd_check(vdev, peer, tid, msdu_list);
265 }
266 
267 void
ol_rx_pn_check_only(struct ol_txrx_vdev_t * vdev,struct ol_txrx_peer_t * peer,unsigned int tid,qdf_nbuf_t msdu_list)268 ol_rx_pn_check_only(struct ol_txrx_vdev_t *vdev,
269 		    struct ol_txrx_peer_t *peer,
270 		    unsigned int tid, qdf_nbuf_t msdu_list)
271 {
272 	msdu_list = ol_rx_pn_check_base(vdev, peer, tid, msdu_list, false);
273 	ol_rx_deliver(vdev, peer, tid, msdu_list);
274 }
275 
276 #if defined(ENABLE_RX_PN_TRACE)
277 
ol_rx_pn_trace_attach(ol_txrx_pdev_handle pdev)278 A_STATUS ol_rx_pn_trace_attach(ol_txrx_pdev_handle pdev)
279 {
280 	int num_elems;
281 
282 	num_elems = 1 << TXRX_RX_PN_TRACE_SIZE_LOG2;
283 	pdev->rx_pn_trace.idx = 0;
284 	pdev->rx_pn_trace.cnt = 0;
285 	pdev->rx_pn_trace.mask = num_elems - 1;
286 	pdev->rx_pn_trace.data =
287 		qdf_mem_malloc(sizeof(*pdev->rx_pn_trace.data) * num_elems);
288 	if (!pdev->rx_pn_trace.data)
289 		return A_NO_MEMORY;
290 	return A_OK;
291 }
292 
ol_rx_pn_trace_detach(ol_txrx_pdev_handle pdev)293 void ol_rx_pn_trace_detach(ol_txrx_pdev_handle pdev)
294 {
295 	qdf_mem_free(pdev->rx_pn_trace.data);
296 }
297 
298 void
ol_rx_pn_trace_add(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer,uint16_t tid,void * rx_desc)299 ol_rx_pn_trace_add(struct ol_txrx_pdev_t *pdev,
300 		   struct ol_txrx_peer_t *peer, uint16_t tid, void *rx_desc)
301 {
302 	uint32_t idx = pdev->rx_pn_trace.idx;
303 	union htt_rx_pn_t pn;
304 	uint32_t pn32;
305 	uint16_t seq_num;
306 	uint8_t unicast;
307 
308 	htt_rx_mpdu_desc_pn(pdev->htt_pdev, rx_desc, &pn, 48);
309 	pn32 = pn.pn48 & 0xffffffff;
310 	seq_num = htt_rx_mpdu_desc_seq_num(pdev->htt_pdev, rx_desc, false);
311 	unicast = !htt_rx_msdu_is_wlan_mcast(pdev->htt_pdev, rx_desc);
312 
313 	pdev->rx_pn_trace.data[idx].peer = peer;
314 	pdev->rx_pn_trace.data[idx].tid = tid;
315 	pdev->rx_pn_trace.data[idx].seq_num = seq_num;
316 	pdev->rx_pn_trace.data[idx].unicast = unicast;
317 	pdev->rx_pn_trace.data[idx].pn32 = pn32;
318 	pdev->rx_pn_trace.cnt++;
319 	idx++;
320 	pdev->rx_pn_trace.idx = idx & pdev->rx_pn_trace.mask;
321 }
322 
ol_rx_pn_trace_display(ol_txrx_pdev_handle pdev,int just_once)323 void ol_rx_pn_trace_display(ol_txrx_pdev_handle pdev, int just_once)
324 {
325 	static int print_count /* = 0 */;
326 	uint32_t i, start, end;
327 	uint64_t cnt;
328 	int elems;
329 	int limit = 0;          /* move this to the arg list? */
330 
331 	if (print_count != 0 && just_once)
332 		return;
333 
334 	print_count++;
335 
336 	end = pdev->rx_pn_trace.idx;
337 	if (pdev->rx_pn_trace.cnt <= pdev->rx_pn_trace.mask) {
338 		/* trace log has not yet wrapped around - start at the top */
339 		start = 0;
340 		cnt = 0;
341 	} else {
342 		start = end;
343 		cnt = pdev->rx_pn_trace.cnt - (pdev->rx_pn_trace.mask + 1);
344 	}
345 	elems = (end - 1 - start) & pdev->rx_pn_trace.mask;
346 	if (limit > 0 && elems > limit) {
347 		int delta;
348 
349 		delta = elems - limit;
350 		start += delta;
351 		start &= pdev->rx_pn_trace.mask;
352 		cnt += delta;
353 	}
354 
355 	i = start;
356 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
357 		  "                                 seq     PN");
358 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
359 		  "   count  idx    peer   tid uni  num    LSBs");
360 	do {
361 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
362 			  "  %6lld %4d  %pK %2d   %d %4d %8d",
363 			  cnt, i,
364 			  pdev->rx_pn_trace.data[i].peer,
365 			  pdev->rx_pn_trace.data[i].tid,
366 			  pdev->rx_pn_trace.data[i].unicast,
367 			  pdev->rx_pn_trace.data[i].seq_num,
368 			  pdev->rx_pn_trace.data[i].pn32);
369 		cnt++;
370 		i++;
371 		i &= pdev->rx_pn_trace.mask;
372 	} while (i != end);
373 }
374 #endif /* ENABLE_RX_PN_TRACE */
375