1 /*
2 * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
3 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 #include <dp_types.h>
19 #include <wlan_dp_main.h>
20 #include <wlan_dp_fisa_rx.h>
21 #include "hal_rx_flow.h"
22 #include "dp_htt.h"
23 #include "dp_internal.h"
24 #include "hif.h"
25
26 static void dp_rx_fisa_flush_flow_wrap(struct dp_fisa_rx_sw_ft *sw_ft);
27
28 /*
29 * Used by FW to route RX packets to host REO2SW1 ring if IPA hit
30 * RX back pressure.
31 */
32 #define REO_DEST_IND_IPA_REROUTE 2
33
34 #if defined(FISA_DEBUG_ENABLE)
35 /**
36 * hex_dump_skb_data() - Helper function to dump skb while debugging
37 * @nbuf: Nbuf to be dumped
38 * @dump: dump enable/disable dumping
39 *
40 * Return: NONE
41 */
hex_dump_skb_data(qdf_nbuf_t nbuf,bool dump)42 static void hex_dump_skb_data(qdf_nbuf_t nbuf, bool dump)
43 {
44 qdf_nbuf_t next_nbuf;
45 int i = 0;
46
47 if (!dump)
48 return;
49
50 if (!nbuf)
51 return;
52
53 dp_fisa_debug("%ps: skb: %pK skb->next:%pK frag_list %pK skb->data:%pK len %d data_len %d",
54 (void *)QDF_RET_IP, nbuf, qdf_nbuf_next(nbuf),
55 qdf_nbuf_get_ext_list(nbuf), qdf_nbuf_data(nbuf),
56 qdf_nbuf_len(nbuf), qdf_nbuf_get_only_data_len(nbuf));
57 QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
58 nbuf->data, 64);
59
60 next_nbuf = qdf_nbuf_get_ext_list(nbuf);
61 while (next_nbuf) {
62 dp_fisa_debug("%d nbuf:%pK nbuf->next:%pK nbuf->data:%pK len %d",
63 i, next_nbuf, qdf_nbuf_next(next_nbuf),
64 qdf_nbuf_data(next_nbuf),
65 qdf_nbuf_len(next_nbuf));
66 QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
67 qdf_nbuf_data(next_nbuf), 64);
68 next_nbuf = qdf_nbuf_next(next_nbuf);
69 i++;
70 }
71 }
72
73 /**
74 * dump_tlvs() - Helper function to dump TLVs of msdu
75 * @hal_soc_hdl: Handle to TLV functions
76 * @buf: Pointer to TLV header
77 * @dbg_level: level control output of TLV dump
78 *
79 * Return: NONE
80 */
dump_tlvs(hal_soc_handle_t hal_soc_hdl,uint8_t * buf,uint8_t dbg_level)81 static void dump_tlvs(hal_soc_handle_t hal_soc_hdl, uint8_t *buf,
82 uint8_t dbg_level)
83 {
84 uint32_t fisa_aggr_count, fisa_timeout, cumulat_l4_csum, cumulat_ip_len;
85 int flow_aggr_cont;
86
87 hal_rx_dump_pkt_tlvs(hal_soc_hdl, buf, dbg_level);
88
89 flow_aggr_cont = hal_rx_get_fisa_flow_agg_continuation(hal_soc_hdl,
90 buf);
91 fisa_aggr_count = hal_rx_get_fisa_flow_agg_count(hal_soc_hdl, buf);
92 fisa_timeout = hal_rx_get_fisa_timeout(hal_soc_hdl, buf);
93 cumulat_l4_csum = hal_rx_get_fisa_cumulative_l4_checksum(hal_soc_hdl,
94 buf);
95 cumulat_ip_len = hal_rx_get_fisa_cumulative_ip_length(hal_soc_hdl, buf);
96
97 dp_fisa_debug("flow_aggr_cont %d, fisa_timeout %d, fisa_aggr_count %d, cumulat_l4_csum %d, cumulat_ip_len %d",
98 flow_aggr_cont, fisa_timeout, fisa_aggr_count,
99 cumulat_l4_csum, cumulat_ip_len);
100 }
101 #else
hex_dump_skb_data(qdf_nbuf_t nbuf,bool dump)102 static void hex_dump_skb_data(qdf_nbuf_t nbuf, bool dump)
103 {
104 }
105
dump_tlvs(hal_soc_handle_t hal_soc_hdl,uint8_t * buf,uint8_t dbg_level)106 static void dump_tlvs(hal_soc_handle_t hal_soc_hdl, uint8_t *buf,
107 uint8_t dbg_level)
108 {
109 }
110 #endif
111
112 #ifdef WLAN_SUPPORT_RX_FISA_HIST
113 static
dp_fisa_record_pkt(struct dp_fisa_rx_sw_ft * fisa_flow,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr,uint16_t tlv_size)114 void dp_fisa_record_pkt(struct dp_fisa_rx_sw_ft *fisa_flow, qdf_nbuf_t nbuf,
115 uint8_t *rx_tlv_hdr, uint16_t tlv_size)
116 {
117 uint32_t index;
118 uint8_t *tlv_hist_ptr;
119
120 if (!rx_tlv_hdr || !fisa_flow || !fisa_flow->pkt_hist.tlv_hist)
121 return;
122
123 index = fisa_flow->pkt_hist.idx++ % FISA_FLOW_MAX_AGGR_COUNT;
124
125 fisa_flow->pkt_hist.ts_hist[index] = qdf_get_log_timestamp();
126 tlv_hist_ptr = fisa_flow->pkt_hist.tlv_hist + (index * tlv_size);
127 qdf_mem_copy(tlv_hist_ptr, rx_tlv_hdr, tlv_size);
128 }
129 #else
130 static
dp_fisa_record_pkt(struct dp_fisa_rx_sw_ft * fisa_flow,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr,uint16_t tlv_size)131 void dp_fisa_record_pkt(struct dp_fisa_rx_sw_ft *fisa_flow, qdf_nbuf_t nbuf,
132 uint8_t *rx_tlv_hdr, uint16_t tlv_size)
133 {
134 }
135
136 #endif
137
138 /**
139 * wlan_dp_nbuf_skip_rx_pkt_tlv() - Function to skip the TLVs and
140 * mac header from msdu
141 * @dp_ctx: DP component handle
142 * @rx_fst: FST handle
143 * @nbuf: msdu for which TLVs has to be skipped
144 *
145 * Return: None
146 */
147 static inline void
wlan_dp_nbuf_skip_rx_pkt_tlv(struct wlan_dp_psoc_context * dp_ctx,struct dp_rx_fst * rx_fst,qdf_nbuf_t nbuf)148 wlan_dp_nbuf_skip_rx_pkt_tlv(struct wlan_dp_psoc_context *dp_ctx,
149 struct dp_rx_fst *rx_fst, qdf_nbuf_t nbuf)
150 {
151 uint8_t *rx_tlv_hdr;
152 uint32_t l2_hdr_offset;
153
154 rx_tlv_hdr = qdf_nbuf_data(nbuf);
155 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(dp_ctx->hal_soc,
156 rx_tlv_hdr);
157 qdf_nbuf_pull_head(nbuf, rx_fst->rx_pkt_tlv_size + l2_hdr_offset);
158 }
159
160 static bool
dp_rx_fisa_should_bypass(struct cdp_rx_flow_tuple_info * flow_tuple_info)161 dp_rx_fisa_should_bypass(struct cdp_rx_flow_tuple_info *flow_tuple_info)
162 {
163 if (flow_tuple_info->dest_port == DNS_SERVER_PORT ||
164 flow_tuple_info->src_port == DNS_SERVER_PORT)
165 return true;
166
167 return false;
168 }
169
170 static bool
dp_fisa_is_ipsec_connection(struct cdp_rx_flow_tuple_info * flow_tuple_info)171 dp_fisa_is_ipsec_connection(struct cdp_rx_flow_tuple_info *flow_tuple_info)
172 {
173 if (flow_tuple_info->dest_port == IPSEC_PORT ||
174 flow_tuple_info->dest_port == IPSEC_NAT_PORT ||
175 flow_tuple_info->src_port == IPSEC_PORT ||
176 flow_tuple_info->src_port == IPSEC_NAT_PORT)
177 return true;
178
179 return false;
180 }
181
182 /**
183 * wlan_dp_get_flow_tuple_from_nbuf() - Get the flow tuple from msdu
184 * @dp_ctx: DP component handle
185 * @flow_tuple_info: return argument where the flow is populated
186 * @nbuf: msdu from which flow tuple is extracted.
187 * @rx_tlv_hdr: Pointer to msdu TLVs
188 *
189 * Return: None
190 */
191 static void
wlan_dp_get_flow_tuple_from_nbuf(struct wlan_dp_psoc_context * dp_ctx,struct cdp_rx_flow_tuple_info * flow_tuple_info,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr)192 wlan_dp_get_flow_tuple_from_nbuf(struct wlan_dp_psoc_context *dp_ctx,
193 struct cdp_rx_flow_tuple_info *flow_tuple_info,
194 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
195 {
196 struct dp_rx_fst *rx_fst = dp_ctx->rx_fst;
197 qdf_net_iphdr_t *iph;
198 qdf_net_tcphdr_t *tcph;
199 uint32_t ip_hdr_offset;
200 uint32_t tcp_hdr_offset;
201 uint32_t l2_hdr_offset =
202 hal_rx_msdu_end_l3_hdr_padding_get(dp_ctx->hal_soc,
203 rx_tlv_hdr);
204
205 hal_rx_get_l3_l4_offsets(dp_ctx->hal_soc, rx_tlv_hdr,
206 &ip_hdr_offset, &tcp_hdr_offset);
207 flow_tuple_info->tuple_populated = true;
208
209 qdf_nbuf_pull_head(nbuf, rx_fst->rx_pkt_tlv_size + l2_hdr_offset);
210
211 iph = (qdf_net_iphdr_t *)(qdf_nbuf_data(nbuf) + ip_hdr_offset);
212 tcph = (qdf_net_tcphdr_t *)(qdf_nbuf_data(nbuf) + ip_hdr_offset +
213 tcp_hdr_offset);
214
215 flow_tuple_info->dest_ip_31_0 = qdf_ntohl(iph->ip_daddr);
216 flow_tuple_info->dest_ip_63_32 = 0;
217 flow_tuple_info->dest_ip_95_64 = 0;
218 flow_tuple_info->dest_ip_127_96 =
219 HAL_IP_DA_SA_PREFIX_IPV4_COMPATIBLE_IPV6;
220
221 flow_tuple_info->src_ip_31_0 = qdf_ntohl(iph->ip_saddr);
222 flow_tuple_info->src_ip_63_32 = 0;
223 flow_tuple_info->src_ip_95_64 = 0;
224 flow_tuple_info->src_ip_127_96 =
225 HAL_IP_DA_SA_PREFIX_IPV4_COMPATIBLE_IPV6;
226
227 flow_tuple_info->dest_port = qdf_ntohs(tcph->dest);
228 flow_tuple_info->src_port = qdf_ntohs(tcph->source);
229 if (dp_fisa_is_ipsec_connection(flow_tuple_info))
230 flow_tuple_info->is_exception = 1;
231 else
232 flow_tuple_info->is_exception = 0;
233
234 flow_tuple_info->bypass_fisa =
235 dp_rx_fisa_should_bypass(flow_tuple_info);
236
237 flow_tuple_info->l4_protocol = iph->ip_proto;
238 dp_fisa_debug("l4_protocol %d", flow_tuple_info->l4_protocol);
239
240 qdf_nbuf_push_head(nbuf, rx_fst->rx_pkt_tlv_size + l2_hdr_offset);
241
242 dp_fisa_debug("head_skb: %pK head_skb->next:%pK head_skb->data:%pK len %d data_len %d",
243 nbuf, qdf_nbuf_next(nbuf), qdf_nbuf_data(nbuf),
244 qdf_nbuf_len(nbuf), qdf_nbuf_get_only_data_len(nbuf));
245 }
246
247 /**
248 * dp_rx_fisa_setup_hw_fse() - Populate flow so as to update DDR flow table
249 * @fisa_hdl: Handle fisa context
250 * @hashed_flow_idx: Index to flow table
251 * @rx_flow_info: tuple to be populated in flow table
252 * @flow_steer_info: REO index to which flow to be steered
253 *
254 * Return: Pointer to DDR flow table entry
255 */
256 static void *
dp_rx_fisa_setup_hw_fse(struct dp_rx_fst * fisa_hdl,uint32_t hashed_flow_idx,struct cdp_rx_flow_tuple_info * rx_flow_info,uint32_t flow_steer_info)257 dp_rx_fisa_setup_hw_fse(struct dp_rx_fst *fisa_hdl,
258 uint32_t hashed_flow_idx,
259 struct cdp_rx_flow_tuple_info *rx_flow_info,
260 uint32_t flow_steer_info)
261 {
262 struct hal_rx_flow flow;
263 void *hw_fse;
264
265 flow.reo_destination_indication = flow_steer_info;
266 flow.fse_metadata = 0xDEADBEEF;
267 flow.tuple_info.dest_ip_127_96 = rx_flow_info->dest_ip_127_96;
268 flow.tuple_info.dest_ip_95_64 = rx_flow_info->dest_ip_95_64;
269 flow.tuple_info.dest_ip_63_32 = rx_flow_info->dest_ip_63_32;
270 flow.tuple_info.dest_ip_31_0 = rx_flow_info->dest_ip_31_0;
271 flow.tuple_info.src_ip_127_96 = rx_flow_info->src_ip_127_96;
272 flow.tuple_info.src_ip_95_64 = rx_flow_info->src_ip_95_64;
273 flow.tuple_info.src_ip_63_32 = rx_flow_info->src_ip_63_32;
274 flow.tuple_info.src_ip_31_0 = rx_flow_info->src_ip_31_0;
275 flow.tuple_info.dest_port = rx_flow_info->dest_port;
276 flow.tuple_info.src_port = rx_flow_info->src_port;
277 flow.tuple_info.l4_protocol = rx_flow_info->l4_protocol;
278 flow.reo_destination_handler = HAL_RX_FSE_REO_DEST_FT;
279 hw_fse = hal_rx_flow_setup_fse(fisa_hdl->dp_ctx->hal_soc,
280 fisa_hdl->hal_rx_fst, hashed_flow_idx,
281 &flow);
282
283 return hw_fse;
284 }
285
286 #ifdef DP_FT_LOCK_HISTORY
287 struct dp_ft_lock_history ft_lock_hist[MAX_REO_DEST_RINGS];
288
289 /**
290 * dp_rx_fisa_record_ft_lock_event() - Record FT lock/unlock events
291 * @reo_id: REO ID
292 * @func: caller function
293 * @type: lock/unlock event type
294 *
295 * Return: None
296 */
dp_rx_fisa_record_ft_lock_event(uint8_t reo_id,const char * func,enum dp_ft_lock_event_type type)297 static void dp_rx_fisa_record_ft_lock_event(uint8_t reo_id, const char *func,
298 enum dp_ft_lock_event_type type)
299 {
300 struct dp_ft_lock_history *lock_hist;
301 struct dp_ft_lock_record *record;
302 uint32_t record_idx;
303
304 if (reo_id >= MAX_REO_DEST_RINGS)
305 return;
306
307 lock_hist = &ft_lock_hist[reo_id];
308 record_idx = lock_hist->record_idx % DP_FT_LOCK_MAX_RECORDS;
309 ft_lock_hist->record_idx++;
310
311 record = &lock_hist->ft_lock_rec[record_idx];
312
313 record->func = func;
314 record->cpu_id = qdf_get_cpu();
315 record->timestamp = qdf_get_log_timestamp();
316 record->type = type;
317 }
318
319 /**
320 * __dp_rx_fisa_acquire_ft_lock() - Acquire lock which protects SW FT entries
321 * @fisa_hdl: Handle to fisa context
322 * @reo_id: REO ID
323 * @func: calling function name
324 *
325 * Return: None
326 */
327 static inline void
__dp_rx_fisa_acquire_ft_lock(struct dp_rx_fst * fisa_hdl,uint8_t reo_id,const char * func)328 __dp_rx_fisa_acquire_ft_lock(struct dp_rx_fst *fisa_hdl,
329 uint8_t reo_id, const char *func)
330 {
331 if (!fisa_hdl->flow_deletion_supported)
332 return;
333
334 qdf_spin_lock_bh(&fisa_hdl->dp_rx_sw_ft_lock[reo_id]);
335 dp_rx_fisa_record_ft_lock_event(reo_id, func, DP_FT_LOCK_EVENT);
336 }
337
338 /**
339 * __dp_rx_fisa_release_ft_lock() - Release lock which protects SW FT entries
340 * @fisa_hdl: Handle to fisa context
341 * @reo_id: REO ID
342 * @func: calling function name
343 *
344 * Return: None
345 */
346 static inline void
__dp_rx_fisa_release_ft_lock(struct dp_rx_fst * fisa_hdl,uint8_t reo_id,const char * func)347 __dp_rx_fisa_release_ft_lock(struct dp_rx_fst *fisa_hdl,
348 uint8_t reo_id, const char *func)
349 {
350 if (!fisa_hdl->flow_deletion_supported)
351 return;
352
353 qdf_spin_unlock_bh(&fisa_hdl->dp_rx_sw_ft_lock[reo_id]);
354 dp_rx_fisa_record_ft_lock_event(reo_id, func, DP_FT_UNLOCK_EVENT);
355 }
356
357 #define dp_rx_fisa_acquire_ft_lock(fisa_hdl, reo_id) \
358 __dp_rx_fisa_acquire_ft_lock(fisa_hdl, reo_id, __func__)
359
360 #define dp_rx_fisa_release_ft_lock(fisa_hdl, reo_id) \
361 __dp_rx_fisa_release_ft_lock(fisa_hdl, reo_id, __func__)
362
363 #else
364 /**
365 * dp_rx_fisa_acquire_ft_lock() - Acquire lock which protects SW FT entries
366 * @fisa_hdl: Handle to fisa context
367 * @reo_id: REO ID
368 *
369 * Return: None
370 */
371 static inline void
dp_rx_fisa_acquire_ft_lock(struct dp_rx_fst * fisa_hdl,uint8_t reo_id)372 dp_rx_fisa_acquire_ft_lock(struct dp_rx_fst *fisa_hdl, uint8_t reo_id)
373 {
374 if (fisa_hdl->flow_deletion_supported)
375 qdf_spin_lock_bh(&fisa_hdl->dp_rx_sw_ft_lock[reo_id]);
376 }
377
378 /**
379 * dp_rx_fisa_release_ft_lock() - Release lock which protects SW FT entries
380 * @fisa_hdl: Handle to fisa context
381 * @reo_id: REO ID
382 *
383 * Return: None
384 */
385 static inline void
dp_rx_fisa_release_ft_lock(struct dp_rx_fst * fisa_hdl,uint8_t reo_id)386 dp_rx_fisa_release_ft_lock(struct dp_rx_fst *fisa_hdl, uint8_t reo_id)
387 {
388 if (fisa_hdl->flow_deletion_supported)
389 qdf_spin_unlock_bh(&fisa_hdl->dp_rx_sw_ft_lock[reo_id]);
390 }
391 #endif /* DP_FT_LOCK_HISTORY */
392
393 /**
394 * dp_rx_fisa_setup_cmem_fse() - Setup the flow search entry in HW CMEM
395 * @fisa_hdl: Handle to fisa context
396 * @hashed_flow_idx: Index to flow table
397 * @rx_flow_info: tuple to be populated in flow table
398 * @flow_steer_info: REO index to which flow to be steered
399 *
400 * Return: Offset to the FSE entry in CMEM
401 */
402 static uint32_t
dp_rx_fisa_setup_cmem_fse(struct dp_rx_fst * fisa_hdl,uint32_t hashed_flow_idx,struct cdp_rx_flow_tuple_info * rx_flow_info,uint32_t flow_steer_info)403 dp_rx_fisa_setup_cmem_fse(struct dp_rx_fst *fisa_hdl, uint32_t hashed_flow_idx,
404 struct cdp_rx_flow_tuple_info *rx_flow_info,
405 uint32_t flow_steer_info)
406 {
407 struct dp_fisa_rx_sw_ft *sw_ft_entry;
408 struct hal_rx_flow flow;
409
410 sw_ft_entry = &(((struct dp_fisa_rx_sw_ft *)
411 fisa_hdl->base)[hashed_flow_idx]);
412 sw_ft_entry->metadata = ++fisa_hdl->meta_counter;
413
414 flow.reo_destination_indication = flow_steer_info;
415 flow.fse_metadata = sw_ft_entry->metadata;
416 flow.tuple_info.dest_ip_127_96 = rx_flow_info->dest_ip_127_96;
417 flow.tuple_info.dest_ip_95_64 = rx_flow_info->dest_ip_95_64;
418 flow.tuple_info.dest_ip_63_32 = rx_flow_info->dest_ip_63_32;
419 flow.tuple_info.dest_ip_31_0 = rx_flow_info->dest_ip_31_0;
420 flow.tuple_info.src_ip_127_96 = rx_flow_info->src_ip_127_96;
421 flow.tuple_info.src_ip_95_64 = rx_flow_info->src_ip_95_64;
422 flow.tuple_info.src_ip_63_32 = rx_flow_info->src_ip_63_32;
423 flow.tuple_info.src_ip_31_0 = rx_flow_info->src_ip_31_0;
424 flow.tuple_info.dest_port = rx_flow_info->dest_port;
425 flow.tuple_info.src_port = rx_flow_info->src_port;
426 flow.tuple_info.l4_protocol = rx_flow_info->l4_protocol;
427 flow.reo_destination_handler = HAL_RX_FSE_REO_DEST_FT;
428
429 return hal_rx_flow_setup_cmem_fse(fisa_hdl->dp_ctx->hal_soc,
430 fisa_hdl->cmem_ba, hashed_flow_idx,
431 &flow);
432 }
433
434 static inline
dp_fisa_rx_get_dp_intf_for_vdev(struct dp_vdev * vdev)435 struct wlan_dp_intf *dp_fisa_rx_get_dp_intf_for_vdev(struct dp_vdev *vdev)
436 {
437 struct wlan_dp_link *dp_link =
438 (struct wlan_dp_link *)vdev->osif_vdev;
439
440 /* dp_link cannot be invalid if vdev is present */
441 return dp_link->dp_intf;
442 }
443
444 /**
445 * dp_rx_fisa_update_sw_ft_entry() - Helper function to update few SW FT entry
446 * @sw_ft_entry: Pointer to softerware flow table entry
447 * @flow_hash: flow_hash for the flow
448 * @vdev: Saving dp_vdev in FT later used in the flushing the flow
449 * @dp_ctx: DP component handle
450 * @flow_id: Flow ID of the flow
451 *
452 * Return: NONE
453 */
dp_rx_fisa_update_sw_ft_entry(struct dp_fisa_rx_sw_ft * sw_ft_entry,uint32_t flow_hash,struct dp_vdev * vdev,struct wlan_dp_psoc_context * dp_ctx,uint32_t flow_id)454 static void dp_rx_fisa_update_sw_ft_entry(struct dp_fisa_rx_sw_ft *sw_ft_entry,
455 uint32_t flow_hash,
456 struct dp_vdev *vdev,
457 struct wlan_dp_psoc_context *dp_ctx,
458 uint32_t flow_id)
459 {
460 sw_ft_entry->flow_hash = flow_hash;
461 sw_ft_entry->flow_id = flow_id;
462 sw_ft_entry->vdev_id = vdev->vdev_id;
463 sw_ft_entry->vdev = vdev;
464 sw_ft_entry->dp_intf = dp_fisa_rx_get_dp_intf_for_vdev(vdev);
465 sw_ft_entry->dp_ctx = dp_ctx;
466 }
467
468 /**
469 * is_same_flow() - Function to compare flow tuple to decide if they match
470 * @tuple1: flow tuple 1
471 * @tuple2: flow tuple 2
472 *
473 * Return: true if they match, false if they differ
474 */
is_same_flow(struct cdp_rx_flow_tuple_info * tuple1,struct cdp_rx_flow_tuple_info * tuple2)475 static bool is_same_flow(struct cdp_rx_flow_tuple_info *tuple1,
476 struct cdp_rx_flow_tuple_info *tuple2)
477 {
478 if ((tuple1->src_port ^ tuple2->src_port) |
479 (tuple1->dest_port ^ tuple2->dest_port) |
480 (tuple1->src_ip_31_0 ^ tuple2->src_ip_31_0) |
481 (tuple1->src_ip_63_32 ^ tuple2->src_ip_63_32) |
482 (tuple1->src_ip_95_64 ^ tuple2->src_ip_95_64) |
483 (tuple1->src_ip_127_96 ^ tuple2->src_ip_127_96) |
484 (tuple1->dest_ip_31_0 ^ tuple2->dest_ip_31_0) |
485 /* DST IP check not required? */
486 (tuple1->dest_ip_63_32 ^ tuple2->dest_ip_63_32) |
487 (tuple1->dest_ip_95_64 ^ tuple2->dest_ip_95_64) |
488 (tuple1->dest_ip_127_96 ^ tuple2->dest_ip_127_96) |
489 (tuple1->l4_protocol ^ tuple2->l4_protocol))
490 return false;
491 else
492 return true;
493 }
494
495 /**
496 * dp_rx_fisa_add_ft_entry() - Add new flow to HW and SW FT if it is not added
497 * @vdev: Handle DP vdev to save in SW flow table
498 * @fisa_hdl: handle to FISA context
499 * @nbuf: nbuf belonging to new flow
500 * @rx_tlv_hdr: Pointer to TLV header
501 * @flow_idx_hash: Hashed flow index
502 * @reo_dest_indication: Reo destination indication for nbuf
503 *
504 * Return: pointer to sw FT entry on success, NULL otherwise
505 */
506 static struct dp_fisa_rx_sw_ft *
dp_rx_fisa_add_ft_entry(struct dp_vdev * vdev,struct dp_rx_fst * fisa_hdl,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr,uint32_t flow_idx_hash,uint32_t reo_dest_indication)507 dp_rx_fisa_add_ft_entry(struct dp_vdev *vdev,
508 struct dp_rx_fst *fisa_hdl,
509 qdf_nbuf_t nbuf,
510 uint8_t *rx_tlv_hdr,
511 uint32_t flow_idx_hash,
512 uint32_t reo_dest_indication)
513 {
514 struct dp_fisa_rx_sw_ft *sw_ft_entry;
515 uint32_t flow_hash;
516 uint32_t hashed_flow_idx;
517 uint32_t skid_count = 0, max_skid_length;
518 struct cdp_rx_flow_tuple_info rx_flow_tuple_info;
519 bool is_fst_updated = false;
520 uint32_t reo_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
521 struct hal_proto_params proto_params;
522
523 if (hal_rx_get_proto_params(fisa_hdl->dp_ctx->hal_soc, rx_tlv_hdr,
524 &proto_params))
525 return NULL;
526
527 if (proto_params.ipv6_proto ||
528 !(proto_params.tcp_proto || proto_params.udp_proto)) {
529 dp_fisa_debug("Not UDP or TCP IPV4 flow");
530 return NULL;
531 }
532
533 rx_flow_tuple_info.tuple_populated = false;
534 flow_hash = flow_idx_hash;
535 hashed_flow_idx = flow_hash & fisa_hdl->hash_mask;
536 max_skid_length = fisa_hdl->max_skid_length;
537
538 dp_fisa_debug("flow_hash 0x%x hashed_flow_idx 0x%x", flow_hash,
539 hashed_flow_idx);
540 dp_fisa_debug("max_skid_length 0x%x", max_skid_length);
541
542 qdf_spin_lock_bh(&fisa_hdl->dp_rx_fst_lock);
543
544 if (!rx_flow_tuple_info.tuple_populated) {
545 wlan_dp_get_flow_tuple_from_nbuf(fisa_hdl->dp_ctx,
546 &rx_flow_tuple_info,
547 nbuf, rx_tlv_hdr);
548 if (rx_flow_tuple_info.bypass_fisa) {
549 qdf_spin_unlock_bh(&fisa_hdl->dp_rx_fst_lock);
550 return NULL;
551 }
552 }
553
554 do {
555 sw_ft_entry = &(((struct dp_fisa_rx_sw_ft *)
556 fisa_hdl->base)[hashed_flow_idx]);
557 if (!sw_ft_entry->is_populated) {
558 /* Add SW FT entry */
559 dp_rx_fisa_update_sw_ft_entry(sw_ft_entry,
560 flow_hash, vdev,
561 fisa_hdl->dp_ctx,
562 hashed_flow_idx);
563
564 /* Add HW FT entry */
565 sw_ft_entry->hw_fse =
566 dp_rx_fisa_setup_hw_fse(fisa_hdl,
567 hashed_flow_idx,
568 &rx_flow_tuple_info,
569 reo_dest_indication);
570 sw_ft_entry->is_populated = true;
571 sw_ft_entry->napi_id = reo_id;
572 sw_ft_entry->reo_dest_indication = reo_dest_indication;
573 sw_ft_entry->flow_id_toeplitz =
574 QDF_NBUF_CB_RX_FLOW_ID(nbuf);
575 sw_ft_entry->flow_init_ts = qdf_get_log_timestamp();
576
577 qdf_mem_copy(&sw_ft_entry->rx_flow_tuple_info,
578 &rx_flow_tuple_info,
579 sizeof(struct cdp_rx_flow_tuple_info));
580
581 sw_ft_entry->is_flow_tcp = proto_params.tcp_proto;
582 sw_ft_entry->is_flow_udp = proto_params.udp_proto;
583 sw_ft_entry->add_timestamp = qdf_get_log_timestamp();
584
585 is_fst_updated = true;
586 fisa_hdl->add_flow_count++;
587 break;
588 }
589 /* else */
590
591 if (is_same_flow(&sw_ft_entry->rx_flow_tuple_info,
592 &rx_flow_tuple_info)) {
593 sw_ft_entry->vdev = vdev;
594 sw_ft_entry->vdev_id = vdev->vdev_id;
595 sw_ft_entry->dp_intf =
596 dp_fisa_rx_get_dp_intf_for_vdev(vdev);
597 dp_fisa_debug("It is same flow fse entry idx %d",
598 hashed_flow_idx);
599 /* Incoming flow tuple matches with existing
600 * entry. This is subsequent skbs of the same
601 * flow. Earlier entry made is not reflected
602 * yet in FSE cache
603 */
604 break;
605 }
606 /* else */
607 /* hash collision move to the next FT entry */
608 dp_fisa_debug("Hash collision %d",
609 fisa_hdl->hash_collision_cnt);
610 fisa_hdl->hash_collision_cnt++;
611 #ifdef NOT_YET /* assist Flow eviction algorithm */
612 /* uint32_t lru_ft_entry_time = 0xffffffff, lru_ft_entry_idx = 0; */
613 if (fisa_hdl->hw_ft_entry->timestamp < lru_ft_entry_time) {
614 lru_ft_entry_time = fisa_hdl->hw_ft_entry->timestamp;
615 lru_ft_entry_idx = hashed_flow_idx;
616 }
617 #endif
618 skid_count++;
619 hashed_flow_idx++;
620 hashed_flow_idx &= fisa_hdl->hash_mask;
621 } while (skid_count <= max_skid_length);
622
623 /*
624 * fisa_hdl->flow_eviction_cnt++;
625 * if (skid_count > max_skid_length)
626 * Remove LRU flow from HW FT
627 * Remove LRU flow from SW FT
628 */
629 qdf_spin_unlock_bh(&fisa_hdl->dp_rx_fst_lock);
630
631 if (skid_count > max_skid_length) {
632 dp_fisa_debug("Max skid length reached flow cannot be added, evict exiting flow");
633 return NULL;
634 }
635
636 /**
637 * Send HTT cache invalidation command to firmware to
638 * reflect the flow update
639 */
640 if (is_fst_updated &&
641 fisa_hdl->fse_cache_flush_allow &&
642 (qdf_atomic_inc_return(&fisa_hdl->fse_cache_flush_posted) == 1)) {
643 /* return 1 after increment implies FSE cache flush message
644 * already posted. so start restart the timer
645 */
646 qdf_timer_start(&fisa_hdl->fse_cache_flush_timer,
647 FSE_CACHE_FLUSH_TIME_OUT);
648 }
649 dp_fisa_debug("sw_ft_entry %pK", sw_ft_entry);
650 return sw_ft_entry;
651 }
652
653 /**
654 * is_flow_idx_valid() - Function to decide if flow_idx TLV is valid
655 * @flow_invalid: flow invalid TLV value
656 * @flow_timeout: flow timeout TLV value, set when FSE timedout flow search
657 *
658 * Return: True if flow_idx value is valid
659 */
is_flow_idx_valid(bool flow_invalid,bool flow_timeout)660 static bool is_flow_idx_valid(bool flow_invalid, bool flow_timeout)
661 {
662 if (!flow_invalid && !flow_timeout)
663 return true;
664 else
665 return false;
666 }
667
668 #ifdef WLAN_SUPPORT_RX_FISA_HIST
669 /**
670 * dp_rx_fisa_save_pkt_hist() - Save pkt history from rx sw ft entry
671 * @ft_entry: sw ft entry
672 * @pkt_hist: pkt history ptr
673 *
674 * Return: None
675 */
676 static inline void
dp_rx_fisa_save_pkt_hist(struct dp_fisa_rx_sw_ft * ft_entry,struct fisa_pkt_hist * pkt_hist)677 dp_rx_fisa_save_pkt_hist(struct dp_fisa_rx_sw_ft *ft_entry,
678 struct fisa_pkt_hist *pkt_hist)
679 {
680 /* Structure copy by assignment */
681 *pkt_hist = ft_entry->pkt_hist;
682 }
683
684 /**
685 * dp_rx_fisa_restore_pkt_hist() - Restore rx sw ft entry pkt history
686 * @ft_entry: sw ft entry
687 * @pkt_hist: pkt history ptr
688 *
689 * Return: None
690 */
691 static inline void
dp_rx_fisa_restore_pkt_hist(struct dp_fisa_rx_sw_ft * ft_entry,struct fisa_pkt_hist * pkt_hist)692 dp_rx_fisa_restore_pkt_hist(struct dp_fisa_rx_sw_ft *ft_entry,
693 struct fisa_pkt_hist *pkt_hist)
694 {
695 /* Structure copy by assignment */
696 ft_entry->pkt_hist = *pkt_hist;
697 }
698 #else
699 static inline void
dp_rx_fisa_save_pkt_hist(struct dp_fisa_rx_sw_ft * ft_entry,struct fisa_pkt_hist * pkt_hist)700 dp_rx_fisa_save_pkt_hist(struct dp_fisa_rx_sw_ft *ft_entry,
701 struct fisa_pkt_hist *pkt_hist)
702 {
703 }
704
705 static inline void
dp_rx_fisa_restore_pkt_hist(struct dp_fisa_rx_sw_ft * ft_entry,struct fisa_pkt_hist * pkt_hist)706 dp_rx_fisa_restore_pkt_hist(struct dp_fisa_rx_sw_ft *ft_entry,
707 struct fisa_pkt_hist *pkt_hist)
708 {
709 }
710 #endif
711
712 /**
713 * dp_fisa_rx_delete_flow() - Delete a flow from SW and HW FST, currently
714 * only applicable when FST is in CMEM
715 * @fisa_hdl: handle to FISA context
716 * @elem: details of the flow which is being added
717 * @hashed_flow_idx: hashed flow idx of the deleting flow
718 *
719 * Return: None
720 */
721 static void
dp_fisa_rx_delete_flow(struct dp_rx_fst * fisa_hdl,struct dp_fisa_rx_fst_update_elem * elem,uint32_t hashed_flow_idx)722 dp_fisa_rx_delete_flow(struct dp_rx_fst *fisa_hdl,
723 struct dp_fisa_rx_fst_update_elem *elem,
724 uint32_t hashed_flow_idx)
725 {
726 struct dp_fisa_rx_sw_ft *sw_ft_entry;
727 struct fisa_pkt_hist pkt_hist;
728 u8 reo_id;
729
730 sw_ft_entry = &(((struct dp_fisa_rx_sw_ft *)
731 fisa_hdl->base)[hashed_flow_idx]);
732 reo_id = sw_ft_entry->napi_id;
733
734 dp_rx_fisa_acquire_ft_lock(fisa_hdl, reo_id);
735
736 /* Flush the flow before deletion */
737 dp_rx_fisa_flush_flow_wrap(sw_ft_entry);
738
739 dp_rx_fisa_save_pkt_hist(sw_ft_entry, &pkt_hist);
740 /* Clear the sw_ft_entry */
741 qdf_mem_zero(sw_ft_entry, sizeof(*sw_ft_entry));
742 dp_rx_fisa_restore_pkt_hist(sw_ft_entry, &pkt_hist);
743
744 dp_rx_fisa_update_sw_ft_entry(sw_ft_entry, elem->flow_idx, elem->vdev,
745 fisa_hdl->dp_ctx, hashed_flow_idx);
746
747 /* Add HW FT entry */
748 sw_ft_entry->cmem_offset = dp_rx_fisa_setup_cmem_fse(
749 fisa_hdl, hashed_flow_idx,
750 &elem->flow_tuple_info,
751 elem->reo_dest_indication);
752
753 sw_ft_entry->is_populated = true;
754 sw_ft_entry->napi_id = elem->reo_id;
755 sw_ft_entry->reo_dest_indication = elem->reo_dest_indication;
756 qdf_mem_copy(&sw_ft_entry->rx_flow_tuple_info, &elem->flow_tuple_info,
757 sizeof(struct cdp_rx_flow_tuple_info));
758
759 sw_ft_entry->is_flow_tcp = elem->is_tcp_flow;
760 sw_ft_entry->is_flow_udp = elem->is_udp_flow;
761 sw_ft_entry->add_timestamp = qdf_get_log_timestamp();
762
763 fisa_hdl->add_flow_count++;
764 fisa_hdl->del_flow_count++;
765
766 dp_rx_fisa_release_ft_lock(fisa_hdl, reo_id);
767 }
768
769 /**
770 * dp_fisa_rx_get_hw_ft_timestamp() - Get timestamp maintained in the HW FSE
771 * @fisa_hdl: handle to FISA context
772 * @hashed_flow_idx: hashed idx of the flow
773 *
774 * Return: Timestamp
775 */
776 static uint32_t
dp_fisa_rx_get_hw_ft_timestamp(struct dp_rx_fst * fisa_hdl,uint32_t hashed_flow_idx)777 dp_fisa_rx_get_hw_ft_timestamp(struct dp_rx_fst *fisa_hdl,
778 uint32_t hashed_flow_idx)
779 {
780 hal_soc_handle_t hal_soc_hdl = fisa_hdl->dp_ctx->hal_soc;
781 struct dp_fisa_rx_sw_ft *sw_ft_entry;
782
783 sw_ft_entry = &(((struct dp_fisa_rx_sw_ft *)
784 fisa_hdl->base)[hashed_flow_idx]);
785
786 if (fisa_hdl->fst_in_cmem)
787 return hal_rx_flow_get_cmem_fse_timestamp(
788 hal_soc_hdl, sw_ft_entry->cmem_offset);
789
790 return ((struct rx_flow_search_entry *)sw_ft_entry->hw_fse)->timestamp;
791 }
792
793 /**
794 * dp_fisa_rx_fst_update() - Core logic which helps in Addition/Deletion
795 * of flows
796 * into/from SW & HW FST
797 * @fisa_hdl: handle to FISA context
798 * @elem: details of the flow which is being added
799 *
800 * Return: None
801 */
dp_fisa_rx_fst_update(struct dp_rx_fst * fisa_hdl,struct dp_fisa_rx_fst_update_elem * elem)802 static void dp_fisa_rx_fst_update(struct dp_rx_fst *fisa_hdl,
803 struct dp_fisa_rx_fst_update_elem *elem)
804 {
805 struct cdp_rx_flow_tuple_info *rx_flow_tuple_info;
806 uint32_t skid_count = 0, max_skid_length;
807 struct dp_fisa_rx_sw_ft *sw_ft_entry;
808 struct wlan_dp_psoc_context *dp_ctx = dp_get_context();
809 struct wlan_dp_psoc_cfg *dp_cfg = &dp_ctx->dp_cfg;
810 bool is_fst_updated = false;
811 uint32_t hashed_flow_idx;
812 uint32_t flow_hash;
813 uint32_t lru_ft_entry_time = 0xffffffff;
814 uint32_t lru_ft_entry_idx = 0;
815 uint32_t timestamp;
816 uint32_t reo_dest_indication;
817 uint64_t sw_timestamp;
818
819 /* Get the hash from TLV
820 * FSE FT Toeplitz hash is same Common parser hash available in TLV
821 * common parser toeplitz hash is same as FSE toeplitz hash as
822 * toeplitz key is same.
823 */
824 flow_hash = elem->flow_idx;
825 hashed_flow_idx = flow_hash & fisa_hdl->hash_mask;
826 max_skid_length = fisa_hdl->max_skid_length;
827 rx_flow_tuple_info = &elem->flow_tuple_info;
828 reo_dest_indication = elem->reo_dest_indication;
829
830 dp_fisa_debug("flow_hash 0x%x hashed_flow_idx 0x%x", flow_hash,
831 hashed_flow_idx);
832 dp_fisa_debug("max_skid_length 0x%x", max_skid_length);
833
834 do {
835 sw_ft_entry = &(((struct dp_fisa_rx_sw_ft *)
836 fisa_hdl->base)[hashed_flow_idx]);
837 if (!sw_ft_entry->is_populated) {
838 /* Add SW FT entry */
839 dp_rx_fisa_update_sw_ft_entry(sw_ft_entry,
840 flow_hash, elem->vdev,
841 fisa_hdl->dp_ctx,
842 hashed_flow_idx);
843
844 /* Add HW FT entry */
845 sw_ft_entry->cmem_offset =
846 dp_rx_fisa_setup_cmem_fse(fisa_hdl,
847 hashed_flow_idx,
848 rx_flow_tuple_info,
849 reo_dest_indication);
850 sw_ft_entry->is_populated = true;
851 sw_ft_entry->napi_id = elem->reo_id;
852 sw_ft_entry->reo_dest_indication = reo_dest_indication;
853 qdf_mem_copy(&sw_ft_entry->rx_flow_tuple_info,
854 rx_flow_tuple_info,
855 sizeof(struct cdp_rx_flow_tuple_info));
856
857 sw_ft_entry->flow_init_ts = qdf_get_log_timestamp();
858 sw_ft_entry->is_flow_tcp = elem->is_tcp_flow;
859 sw_ft_entry->is_flow_udp = elem->is_udp_flow;
860
861 sw_ft_entry->add_timestamp = qdf_get_log_timestamp();
862
863 is_fst_updated = true;
864 fisa_hdl->add_flow_count++;
865 break;
866 }
867 /* else */
868 /* hash collision move to the next FT entry */
869 dp_fisa_debug("Hash collision %d",
870 fisa_hdl->hash_collision_cnt);
871 fisa_hdl->hash_collision_cnt++;
872
873 timestamp = dp_fisa_rx_get_hw_ft_timestamp(fisa_hdl,
874 hashed_flow_idx);
875 if (timestamp < lru_ft_entry_time) {
876 lru_ft_entry_time = timestamp;
877 lru_ft_entry_idx = hashed_flow_idx;
878 }
879 skid_count++;
880 hashed_flow_idx++;
881 hashed_flow_idx &= fisa_hdl->hash_mask;
882 } while (skid_count <= max_skid_length);
883
884 /*
885 * if (skid_count > max_skid_length)
886 * Remove LRU flow from HW FT
887 * Remove LRU flow from SW FT
888 */
889 if ((skid_count > max_skid_length) &&
890 wlan_dp_cfg_is_rx_fisa_lru_del_enabled(dp_cfg)) {
891 dp_fisa_debug("Max skid length reached flow cannot be added, evict exiting flow");
892
893 sw_ft_entry = &(((struct dp_fisa_rx_sw_ft *)
894 fisa_hdl->base)[lru_ft_entry_idx]);
895 sw_timestamp = qdf_get_log_timestamp();
896
897 if (qdf_log_timestamp_to_usecs(sw_timestamp - sw_ft_entry->add_timestamp) >
898 FISA_FT_ENTRY_AGING_US) {
899 qdf_spin_unlock_bh(&fisa_hdl->dp_rx_fst_lock);
900 dp_fisa_rx_delete_flow(fisa_hdl, elem, lru_ft_entry_idx);
901 qdf_spin_lock_bh(&fisa_hdl->dp_rx_fst_lock);
902 is_fst_updated = true;
903 } else
904 dp_fisa_debug("skip update due to aging not complete");
905 }
906
907 /**
908 * Send HTT cache invalidation command to firmware to
909 * reflect the flow update
910 */
911 if (is_fst_updated &&
912 fisa_hdl->fse_cache_flush_allow &&
913 (qdf_atomic_inc_return(&fisa_hdl->fse_cache_flush_posted) == 1)) {
914 /* return 1 after increment implies FSE cache flush message
915 * already posted. so start restart the timer
916 */
917 qdf_timer_start(&fisa_hdl->fse_cache_flush_timer,
918 FSE_CACHE_FLUSH_TIME_OUT);
919 }
920 }
921
922 /**
923 * dp_fisa_rx_fst_update_work() - Work functions for FST updates
924 * @arg: argument passed to the work function
925 *
926 * Return: None
927 */
dp_fisa_rx_fst_update_work(void * arg)928 void dp_fisa_rx_fst_update_work(void *arg)
929 {
930 struct dp_fisa_rx_fst_update_elem *elem;
931 struct dp_rx_fst *fisa_hdl = arg;
932 qdf_list_node_t *node;
933 hal_soc_handle_t hal_soc_hdl = fisa_hdl->dp_ctx->hal_soc;
934 struct dp_vdev *vdev;
935
936 if (qdf_atomic_read(&fisa_hdl->pm_suspended)) {
937 dp_err_rl("WQ triggered during suspend stage, deferred update");
938 DP_STATS_INC(fisa_hdl, update_deferred, 1);
939 return;
940 }
941
942 if (hif_force_wake_request(((struct hal_soc *)hal_soc_hdl)->hif_handle)) {
943 dp_err("Wake up request failed");
944 qdf_check_state_before_panic(__func__, __LINE__);
945 return;
946 }
947
948 qdf_spin_lock_bh(&fisa_hdl->dp_rx_fst_lock);
949 while (qdf_list_peek_front(&fisa_hdl->fst_update_list, &node) ==
950 QDF_STATUS_SUCCESS) {
951 elem = (struct dp_fisa_rx_fst_update_elem *)node;
952 vdev = dp_vdev_get_ref_by_id(fisa_hdl->soc_hdl,
953 elem->vdev_id,
954 DP_MOD_ID_RX);
955 /*
956 * Update fst only if current dp_vdev fetched by vdev_id is
957 * still valid and match with the original dp_vdev when fst
958 * node is queued.
959 */
960 if (vdev) {
961 if (vdev == elem->vdev)
962 dp_fisa_rx_fst_update(fisa_hdl, elem);
963
964 dp_vdev_unref_delete(fisa_hdl->soc_hdl, vdev,
965 DP_MOD_ID_RX);
966 }
967 qdf_list_remove_front(&fisa_hdl->fst_update_list, &node);
968 qdf_mem_free(elem);
969 }
970 qdf_spin_unlock_bh(&fisa_hdl->dp_rx_fst_lock);
971
972 if (hif_force_wake_release(((struct hal_soc *)hal_soc_hdl)->hif_handle)) {
973 dp_err("Wake up release failed");
974 qdf_check_state_before_panic(__func__, __LINE__);
975 return;
976 }
977 }
978
979 /**
980 * dp_fisa_rx_is_fst_work_queued() - Check if work is already queued for
981 * the flow
982 * @fisa_hdl: handle to FISA context
983 * @flow_idx: Flow index
984 *
985 * Return: True/False
986 */
987 static inline bool
dp_fisa_rx_is_fst_work_queued(struct dp_rx_fst * fisa_hdl,uint32_t flow_idx)988 dp_fisa_rx_is_fst_work_queued(struct dp_rx_fst *fisa_hdl, uint32_t flow_idx)
989 {
990 struct dp_fisa_rx_fst_update_elem *elem;
991 qdf_list_node_t *cur_node, *next_node;
992 QDF_STATUS status;
993
994 status = qdf_list_peek_front(&fisa_hdl->fst_update_list, &cur_node);
995 if (status == QDF_STATUS_E_EMPTY)
996 return false;
997
998 do {
999 elem = (struct dp_fisa_rx_fst_update_elem *)cur_node;
1000 if (elem->flow_idx == flow_idx)
1001 return true;
1002
1003 status = qdf_list_peek_next(&fisa_hdl->fst_update_list,
1004 cur_node, &next_node);
1005 cur_node = next_node;
1006 } while (status == QDF_STATUS_SUCCESS);
1007
1008 return false;
1009 }
1010
1011 /**
1012 * dp_fisa_rx_queue_fst_update_work() - Queue FST update work
1013 * @fisa_hdl: Handle to FISA context
1014 * @flow_idx: Flow index
1015 * @nbuf: Received RX packet
1016 * @vdev: DP vdev handle
1017 *
1018 * Return: None
1019 */
1020 static void *
dp_fisa_rx_queue_fst_update_work(struct dp_rx_fst * fisa_hdl,uint32_t flow_idx,qdf_nbuf_t nbuf,struct dp_vdev * vdev)1021 dp_fisa_rx_queue_fst_update_work(struct dp_rx_fst *fisa_hdl, uint32_t flow_idx,
1022 qdf_nbuf_t nbuf, struct dp_vdev *vdev)
1023 {
1024 hal_soc_handle_t hal_soc_hdl = fisa_hdl->dp_ctx->hal_soc;
1025 struct cdp_rx_flow_tuple_info flow_tuple_info;
1026 uint8_t *rx_tlv_hdr = qdf_nbuf_data(nbuf);
1027 struct dp_fisa_rx_fst_update_elem *elem;
1028 struct dp_fisa_rx_sw_ft *sw_ft_entry;
1029 uint32_t hashed_flow_idx;
1030 uint32_t reo_dest_indication;
1031 bool found;
1032 struct hal_proto_params proto_params;
1033
1034 if (hal_rx_get_proto_params(fisa_hdl->dp_ctx->hal_soc, rx_tlv_hdr,
1035 &proto_params))
1036 return NULL;
1037
1038 if (proto_params.ipv6_proto ||
1039 !(proto_params.tcp_proto || proto_params.udp_proto)) {
1040 dp_fisa_debug("Not UDP or TCP IPV4 flow");
1041 return NULL;
1042 }
1043
1044 hal_rx_msdu_get_reo_destination_indication(hal_soc_hdl, rx_tlv_hdr,
1045 &reo_dest_indication);
1046 qdf_spin_lock_bh(&fisa_hdl->dp_rx_fst_lock);
1047 found = dp_fisa_rx_is_fst_work_queued(fisa_hdl, flow_idx);
1048 qdf_spin_unlock_bh(&fisa_hdl->dp_rx_fst_lock);
1049 if (found)
1050 return NULL;
1051
1052 hashed_flow_idx = flow_idx & fisa_hdl->hash_mask;
1053 sw_ft_entry = &(((struct dp_fisa_rx_sw_ft *)
1054 fisa_hdl->base)[hashed_flow_idx]);
1055
1056 wlan_dp_get_flow_tuple_from_nbuf(fisa_hdl->dp_ctx, &flow_tuple_info,
1057 nbuf, rx_tlv_hdr);
1058 if (flow_tuple_info.bypass_fisa)
1059 return NULL;
1060
1061 if (sw_ft_entry->is_populated && is_same_flow(
1062 &sw_ft_entry->rx_flow_tuple_info, &flow_tuple_info))
1063 return sw_ft_entry;
1064
1065 elem = qdf_mem_malloc(sizeof(*elem));
1066 if (!elem) {
1067 dp_fisa_debug("failed to allocate memory for FST update");
1068 return NULL;
1069 }
1070
1071 qdf_mem_copy(&elem->flow_tuple_info, &flow_tuple_info,
1072 sizeof(struct cdp_rx_flow_tuple_info));
1073 elem->flow_idx = flow_idx;
1074 elem->is_tcp_flow = proto_params.tcp_proto;
1075 elem->is_udp_flow = proto_params.udp_proto;
1076 elem->reo_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
1077 elem->reo_dest_indication = reo_dest_indication;
1078 elem->vdev = vdev;
1079 elem->vdev_id = vdev->vdev_id;
1080
1081 qdf_spin_lock_bh(&fisa_hdl->dp_rx_fst_lock);
1082 qdf_list_insert_back(&fisa_hdl->fst_update_list, &elem->node);
1083 qdf_spin_unlock_bh(&fisa_hdl->dp_rx_fst_lock);
1084
1085 if (qdf_atomic_read(&fisa_hdl->pm_suspended)) {
1086 fisa_hdl->fst_wq_defer = true;
1087 dp_info("defer fst update task in WoW");
1088 } else {
1089 qdf_queue_work(fisa_hdl->dp_ctx->qdf_dev,
1090 fisa_hdl->fst_update_wq,
1091 &fisa_hdl->fst_update_work);
1092 }
1093
1094 return NULL;
1095 }
1096
1097 /**
1098 * dp_fisa_rx_get_sw_ft_entry() - Get SW FT entry for the flow
1099 * @fisa_hdl: Handle to FISA context
1100 * @nbuf: Received RX packet
1101 * @flow_idx: Flow index
1102 * @vdev: handle to DP vdev
1103 *
1104 * Return: SW FT entry
1105 */
1106 static inline struct dp_fisa_rx_sw_ft *
dp_fisa_rx_get_sw_ft_entry(struct dp_rx_fst * fisa_hdl,qdf_nbuf_t nbuf,uint32_t flow_idx,struct dp_vdev * vdev)1107 dp_fisa_rx_get_sw_ft_entry(struct dp_rx_fst *fisa_hdl, qdf_nbuf_t nbuf,
1108 uint32_t flow_idx, struct dp_vdev *vdev)
1109 {
1110 hal_soc_handle_t hal_soc_hdl = fisa_hdl->dp_ctx->hal_soc;
1111 struct dp_fisa_rx_sw_ft *sw_ft_entry = NULL;
1112 struct dp_fisa_rx_sw_ft *sw_ft_base;
1113 uint32_t fse_metadata;
1114 uint8_t *rx_tlv_hdr;
1115
1116 sw_ft_base = (struct dp_fisa_rx_sw_ft *)fisa_hdl->base;
1117 rx_tlv_hdr = qdf_nbuf_data(nbuf);
1118
1119 if (qdf_unlikely(flow_idx >= fisa_hdl->max_entries)) {
1120 dp_info("flow_idx is invalid 0x%x", flow_idx);
1121 hal_rx_dump_pkt_tlvs(hal_soc_hdl, rx_tlv_hdr,
1122 QDF_TRACE_LEVEL_INFO_HIGH);
1123 DP_STATS_INC(fisa_hdl, invalid_flow_index, 1);
1124 return NULL;
1125 }
1126
1127 sw_ft_entry = &sw_ft_base[flow_idx];
1128 if (!sw_ft_entry->is_populated) {
1129 dp_info("Pkt rx for non configured flow idx 0x%x", flow_idx);
1130 DP_STATS_INC(fisa_hdl, invalid_flow_index, 1);
1131 return NULL;
1132 }
1133
1134 if (!fisa_hdl->flow_deletion_supported) {
1135 sw_ft_entry->vdev = vdev;
1136 sw_ft_entry->vdev_id = vdev->vdev_id;
1137 sw_ft_entry->dp_intf = dp_fisa_rx_get_dp_intf_for_vdev(vdev);
1138 return sw_ft_entry;
1139 }
1140
1141 /* When a flow is deleted, there could be some packets of that flow
1142 * with valid flow_idx in the REO queue and arrive at a later time,
1143 * compare the metadata for such packets before returning the SW FT
1144 * entry to avoid packets getting aggregated with the wrong flow.
1145 */
1146 fse_metadata = hal_rx_msdu_fse_metadata_get(hal_soc_hdl, rx_tlv_hdr);
1147 if (fisa_hdl->del_flow_count && fse_metadata != sw_ft_entry->metadata)
1148 return NULL;
1149
1150 sw_ft_entry->vdev = vdev;
1151 sw_ft_entry->vdev_id = vdev->vdev_id;
1152 sw_ft_entry->dp_intf = dp_fisa_rx_get_dp_intf_for_vdev(vdev);
1153 return sw_ft_entry;
1154 }
1155
1156 #ifdef DP_OFFLOAD_FRAME_WITH_SW_EXCEPTION
1157 /*
1158 * dp_rx_reo_dest_honor_check() - check if packet reo destination is changed
1159 by FW offload and is valid
1160 * @fisa_hdl: handle to FISA context
1161 *@nbuf: RX packet nbuf
1162 *@tlv_reo_dest_ind: reo_dest_ind fetched from rx_packet_tlv
1163 *
1164 * Return: QDF_STATUS_SUCCESS - reo dest not change/ not valid, others - yes.
1165 */
1166 static inline QDF_STATUS
dp_rx_reo_dest_honor_check(struct dp_rx_fst * fisa_hdl,qdf_nbuf_t nbuf,uint32_t tlv_reo_dest_ind)1167 dp_rx_reo_dest_honor_check(struct dp_rx_fst *fisa_hdl, qdf_nbuf_t nbuf,
1168 uint32_t tlv_reo_dest_ind)
1169 {
1170 uint8_t sw_exception =
1171 qdf_nbuf_get_rx_reo_dest_ind_or_sw_excpt(nbuf);
1172
1173 if (fisa_hdl->rx_hash_enabled &&
1174 (tlv_reo_dest_ind < HAL_REO_DEST_IND_START_OFFSET))
1175 return QDF_STATUS_E_FAILURE;
1176 /*
1177 * If sw_exception bit is marked, then this data packet is
1178 * re-injected by FW offload, reo destination will not honor
1179 * the original FSE/hash selection, skip FISA.
1180 */
1181 return sw_exception ? QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
1182 }
1183 #else
1184 static inline QDF_STATUS
dp_rx_reo_dest_honor_check(struct dp_rx_fst * fisa_hdl,qdf_nbuf_t nbuf,uint32_t tlv_reo_dest_ind)1185 dp_rx_reo_dest_honor_check(struct dp_rx_fst *fisa_hdl, qdf_nbuf_t nbuf,
1186 uint32_t tlv_reo_dest_ind)
1187 {
1188 uint8_t ring_reo_dest_ind =
1189 qdf_nbuf_get_rx_reo_dest_ind_or_sw_excpt(nbuf);
1190 /*
1191 * Compare reo_destination_indication between reo ring descriptor
1192 * and rx_pkt_tlvs, if they are different, then likely these kind
1193 * of frames re-injected by FW or touched by other module already,
1194 * skip FISA to avoid REO2SW ring mismatch issue for same flow.
1195 */
1196 if (tlv_reo_dest_ind != ring_reo_dest_ind ||
1197 REO_DEST_IND_IPA_REROUTE == ring_reo_dest_ind ||
1198 (fisa_hdl->rx_hash_enabled &&
1199 (tlv_reo_dest_ind < HAL_REO_DEST_IND_START_OFFSET)))
1200 return QDF_STATUS_E_FAILURE;
1201
1202 return QDF_STATUS_SUCCESS;
1203 }
1204 #endif
1205
1206 /**
1207 * dp_rx_get_fisa_flow() - Get FT entry corresponding to incoming nbuf
1208 * @fisa_hdl: handle to FISA context
1209 * @vdev: handle to DP vdev
1210 * @nbuf: incoming msdu
1211 *
1212 * Return: handle SW FT entry for nbuf flow
1213 */
1214 static struct dp_fisa_rx_sw_ft *
dp_rx_get_fisa_flow(struct dp_rx_fst * fisa_hdl,struct dp_vdev * vdev,qdf_nbuf_t nbuf)1215 dp_rx_get_fisa_flow(struct dp_rx_fst *fisa_hdl, struct dp_vdev *vdev,
1216 qdf_nbuf_t nbuf)
1217 {
1218 uint8_t *rx_tlv_hdr;
1219 uint32_t flow_idx_hash;
1220 uint32_t tlv_reo_dest_ind;
1221 bool flow_invalid, flow_timeout, flow_idx_valid;
1222 struct dp_fisa_rx_sw_ft *sw_ft_entry = NULL;
1223 hal_soc_handle_t hal_soc_hdl = fisa_hdl->dp_ctx->hal_soc;
1224 QDF_STATUS status;
1225
1226 if (QDF_NBUF_CB_RX_TCP_PROTO(nbuf))
1227 return sw_ft_entry;
1228
1229 rx_tlv_hdr = qdf_nbuf_data(nbuf);
1230 hal_rx_msdu_get_reo_destination_indication(hal_soc_hdl, rx_tlv_hdr,
1231 &tlv_reo_dest_ind);
1232 status = dp_rx_reo_dest_honor_check(fisa_hdl, nbuf, tlv_reo_dest_ind);
1233 if (QDF_IS_STATUS_ERROR(status))
1234 return sw_ft_entry;
1235
1236 hal_rx_msdu_get_flow_params(hal_soc_hdl, rx_tlv_hdr, &flow_invalid,
1237 &flow_timeout, &flow_idx_hash);
1238
1239 flow_idx_valid = is_flow_idx_valid(flow_invalid, flow_timeout);
1240 if (flow_idx_valid) {
1241 sw_ft_entry = dp_fisa_rx_get_sw_ft_entry(fisa_hdl, nbuf,
1242 flow_idx_hash, vdev);
1243 goto print_and_return;
1244 }
1245
1246 /* else new flow, add entry to FT */
1247
1248 if (fisa_hdl->fst_in_cmem)
1249 return dp_fisa_rx_queue_fst_update_work(fisa_hdl, flow_idx_hash,
1250 nbuf, vdev);
1251
1252 sw_ft_entry = dp_rx_fisa_add_ft_entry(vdev, fisa_hdl,
1253 nbuf,
1254 rx_tlv_hdr,
1255 flow_idx_hash,
1256 tlv_reo_dest_ind);
1257
1258 print_and_return:
1259 dp_fisa_debug("nbuf %pK fl_idx 0x%x fl_inv %d fl_timeout %d flow_id_toeplitz %x reo_dest_ind 0x%x",
1260 nbuf, flow_idx_hash, flow_invalid, flow_timeout,
1261 sw_ft_entry ? sw_ft_entry->flow_id_toeplitz : 0,
1262 tlv_reo_dest_ind);
1263
1264 return sw_ft_entry;
1265 }
1266
1267 #ifdef NOT_YET
1268 /**
1269 * dp_rx_fisa_aggr_tcp() - Aggregate incoming to TCP nbuf
1270 * @fisa_flow: Handle to SW flow entry, which holds the aggregated nbuf
1271 * @nbuf: Incoming nbuf
1272 *
1273 * Return: FISA_AGGR_DONE on successful aggregation
1274 */
1275 static enum fisa_aggr_ret
dp_rx_fisa_aggr_tcp(struct dp_fisa_rx_sw_ft * fisa_flow,qdf_nbuf_t nbuf)1276 dp_rx_fisa_aggr_tcp(struct dp_fisa_rx_sw_ft *fisa_flow, qdf_nbuf_t nbuf)
1277 {
1278 qdf_nbuf_t head_skb = fisa_flow->head_skb;
1279 qdf_net_iphdr_t *iph;
1280 uint32_t tcp_data_len;
1281
1282 fisa_flow->bytes_aggregated += qdf_nbuf_len(nbuf);
1283 if (!head_skb) {
1284 /* First nbuf for the flow */
1285 dp_fisa_debug("first head skb");
1286 fisa_flow->head_skb = nbuf;
1287 return FISA_AGGR_DONE;
1288 }
1289
1290 tcp_data_len = (qdf_ntohs(iph->ip_len) - sizeof(qdf_net_iphdr_t) -
1291 sizeof(qdf_net_tcphdr_t));
1292 qdf_nbuf_pull_head(nbuf, (qdf_nbuf_len(nbuf) - tcp_data_len));
1293
1294 if (qdf_nbuf_get_ext_list(head_skb)) {
1295 /* this is 3rd skb after head skb, 2nd skb */
1296 fisa_flow->last_skb->next = nbuf;
1297 } else {
1298 /* 1st skb after head skb */
1299 qdf_nbuf_append_ext_list(head_skb, nbuf,
1300 fisa_flow->cumulative_ip_length);
1301 qdf_nbuf_set_is_frag(head, 1);
1302 }
1303
1304 fisa_flow->last_skb = nbuf;
1305 fisa_flow->aggr_count++;
1306
1307 /* move it to while flushing the flow, that is update before flushing */
1308 return FISA_AGGR_DONE;
1309 }
1310 #else
1311 static enum fisa_aggr_ret
dp_rx_fisa_aggr_tcp(struct dp_rx_fst * fisa_hdl,struct dp_fisa_rx_sw_ft * fisa_flow,qdf_nbuf_t nbuf)1312 dp_rx_fisa_aggr_tcp(struct dp_rx_fst *fisa_hdl,
1313 struct dp_fisa_rx_sw_ft *fisa_flow, qdf_nbuf_t nbuf)
1314 {
1315 return FISA_AGGR_DONE;
1316 }
1317 #endif
1318
1319 /**
1320 * get_transport_payload_offset() - Get offset to payload
1321 * @fisa_hdl: Handle to FISA context
1322 * @l3_hdr_offset: layer 3 header offset
1323 * @l4_hdr_offset: layer 4 header offset
1324 *
1325 * Return: Offset value to transport payload
1326 */
get_transport_payload_offset(struct dp_rx_fst * fisa_hdl,uint32_t l3_hdr_offset,uint32_t l4_hdr_offset)1327 static inline int get_transport_payload_offset(struct dp_rx_fst *fisa_hdl,
1328 uint32_t l3_hdr_offset,
1329 uint32_t l4_hdr_offset)
1330 {
1331 /* ETHERNET_HDR_LEN + ip_hdr_len + UDP/TCP; */
1332 return (l3_hdr_offset + l4_hdr_offset + sizeof(qdf_net_udphdr_t));
1333 }
1334
1335 /**
1336 * get_transport_header_offset() - Get transport header offset
1337 * @fisa_flow: Handle to FISA sw flow entry
1338 * @l3_hdr_offset: layer 3 header offset
1339 * @l4_hdr_offset: layer 4 header offset
1340 *
1341 * Return: Offset value to transport header
1342 */
1343 static inline
get_transport_header_offset(struct dp_fisa_rx_sw_ft * fisa_flow,uint32_t l3_hdr_offset,uint32_t l4_hdr_offset)1344 int get_transport_header_offset(struct dp_fisa_rx_sw_ft *fisa_flow,
1345 uint32_t l3_hdr_offset,
1346 uint32_t l4_hdr_offset)
1347
1348 {
1349 /* ETHERNET_HDR_LEN + ip_hdr_len */
1350 return (l3_hdr_offset + l4_hdr_offset);
1351 }
1352
1353 /**
1354 * dp_rx_fisa_aggr_udp() - Aggregate incoming to UDP nbuf
1355 * @fisa_hdl: Handle fisa context
1356 * @fisa_flow: Handle to SW flow entry, which holds the aggregated nbuf
1357 * @nbuf: Incoming nbuf
1358 *
1359 * Return: FISA_AGGR_DONE on successful aggregation
1360 */
1361 static enum fisa_aggr_ret
dp_rx_fisa_aggr_udp(struct dp_rx_fst * fisa_hdl,struct dp_fisa_rx_sw_ft * fisa_flow,qdf_nbuf_t nbuf)1362 dp_rx_fisa_aggr_udp(struct dp_rx_fst *fisa_hdl,
1363 struct dp_fisa_rx_sw_ft *fisa_flow, qdf_nbuf_t nbuf)
1364 {
1365 qdf_nbuf_t head_skb = fisa_flow->head_skb;
1366 uint8_t *rx_tlv_hdr = qdf_nbuf_data(nbuf);
1367 uint32_t l2_hdr_offset =
1368 hal_rx_msdu_end_l3_hdr_padding_get(fisa_hdl->dp_ctx->hal_soc,
1369 rx_tlv_hdr);
1370 qdf_net_udphdr_t *udp_hdr;
1371 uint32_t udp_len;
1372 uint32_t transport_payload_offset;
1373 uint32_t l3_hdr_offset, l4_hdr_offset;
1374
1375 qdf_nbuf_pull_head(nbuf, fisa_hdl->rx_pkt_tlv_size + l2_hdr_offset);
1376
1377 hal_rx_get_l3_l4_offsets(fisa_hdl->dp_ctx->hal_soc, rx_tlv_hdr,
1378 &l3_hdr_offset, &l4_hdr_offset);
1379 udp_hdr = (qdf_net_udphdr_t *)(qdf_nbuf_data(nbuf) +
1380 get_transport_header_offset(fisa_flow, l3_hdr_offset,
1381 l4_hdr_offset));
1382
1383 udp_len = qdf_ntohs(udp_hdr->udp_len);
1384
1385 /**
1386 * Incoming nbuf is of size greater than ongoing aggregation
1387 * then flush the aggregate and start new aggregation for nbuf
1388 */
1389 if (head_skb &&
1390 (udp_len > qdf_ntohs(fisa_flow->head_skb_udp_hdr->udp_len))) {
1391 /* current msdu should not take into account for flushing */
1392 fisa_flow->adjusted_cumulative_ip_length -=
1393 (udp_len - sizeof(qdf_net_udphdr_t));
1394 fisa_flow->cur_aggr--;
1395 dp_rx_fisa_flush_flow_wrap(fisa_flow);
1396 /* napi_flush_cumulative_ip_length not include current msdu */
1397 fisa_flow->napi_flush_cumulative_ip_length -= udp_len;
1398 head_skb = NULL;
1399 }
1400
1401 if (!head_skb) {
1402 dp_fisa_debug("first head skb nbuf %pK", nbuf);
1403 /* First nbuf for the flow */
1404 fisa_flow->head_skb = nbuf;
1405 fisa_flow->head_skb_udp_hdr = udp_hdr;
1406 fisa_flow->cur_aggr_gso_size = udp_len -
1407 sizeof(qdf_net_udphdr_t);
1408 fisa_flow->adjusted_cumulative_ip_length = udp_len;
1409 fisa_flow->head_skb_ip_hdr_offset = l3_hdr_offset;
1410 fisa_flow->head_skb_l4_hdr_offset = l4_hdr_offset;
1411
1412 fisa_flow->frags_cumulative_len = 0;
1413
1414 return FISA_AGGR_DONE;
1415 }
1416
1417 transport_payload_offset =
1418 get_transport_payload_offset(fisa_hdl, l3_hdr_offset,
1419 l4_hdr_offset);
1420
1421 hex_dump_skb_data(nbuf, false);
1422 qdf_nbuf_pull_head(nbuf, transport_payload_offset);
1423 hex_dump_skb_data(nbuf, false);
1424
1425 fisa_flow->bytes_aggregated += qdf_nbuf_len(nbuf);
1426
1427 fisa_flow->frags_cumulative_len += (udp_len -
1428 sizeof(qdf_net_udphdr_t));
1429
1430 if (qdf_nbuf_get_ext_list(head_skb)) {
1431 /*
1432 * This is 3rd skb for flow.
1433 * After head skb, 2nd skb in fraglist
1434 */
1435 if (qdf_likely(fisa_flow->last_skb)) {
1436 qdf_nbuf_set_next(fisa_flow->last_skb, nbuf);
1437 } else {
1438 qdf_nbuf_free(nbuf);
1439 return FISA_AGGR_DONE;
1440 }
1441 } else {
1442 /* 1st skb after head skb
1443 * implement qdf wrapper set_ext_list
1444 */
1445 qdf_nbuf_append_ext_list(head_skb, nbuf, 0);
1446 qdf_nbuf_set_is_frag(nbuf, 1);
1447 }
1448
1449 fisa_flow->last_skb = nbuf;
1450 fisa_flow->aggr_count++;
1451
1452 dp_fisa_debug("Stiched head skb fisa_flow %pK", fisa_flow);
1453 hex_dump_skb_data(fisa_flow->head_skb, false);
1454
1455 /**
1456 * Incoming nbuf is of size less than ongoing aggregation
1457 * then flush the aggregate
1458 */
1459 if (udp_len < qdf_ntohs(fisa_flow->head_skb_udp_hdr->udp_len))
1460 dp_rx_fisa_flush_flow_wrap(fisa_flow);
1461
1462 return FISA_AGGR_DONE;
1463 }
1464
1465 /**
1466 * dp_fisa_rx_linear_skb() - Linearize fraglist skb to linear skb
1467 * @vdev: handle to DP vdev
1468 * @head_skb: non linear skb
1469 * @size: Total length of non linear stiched skb
1470 *
1471 * Return: Linearized skb pointer
1472 */
dp_fisa_rx_linear_skb(struct dp_vdev * vdev,qdf_nbuf_t head_skb,uint32_t size)1473 static qdf_nbuf_t dp_fisa_rx_linear_skb(struct dp_vdev *vdev,
1474 qdf_nbuf_t head_skb, uint32_t size)
1475 {
1476 return NULL;
1477 }
1478
1479 #ifdef WLAN_FEATURE_11BE
1480 static inline struct dp_vdev *
dp_fisa_rx_get_flow_flush_vdev_ref(ol_txrx_soc_handle cdp_soc,struct dp_fisa_rx_sw_ft * fisa_flow)1481 dp_fisa_rx_get_flow_flush_vdev_ref(ol_txrx_soc_handle cdp_soc,
1482 struct dp_fisa_rx_sw_ft *fisa_flow)
1483 {
1484 struct dp_vdev *fisa_flow_head_skb_vdev;
1485 struct dp_vdev *fisa_flow_vdev;
1486 uint8_t vdev_id;
1487
1488 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(fisa_flow->head_skb);
1489
1490 get_new_vdev_ref:
1491 fisa_flow_head_skb_vdev = dp_vdev_get_ref_by_id(
1492 cdp_soc_t_to_dp_soc(cdp_soc),
1493 vdev_id, DP_MOD_ID_RX);
1494 if (qdf_unlikely(!fisa_flow_head_skb_vdev)) {
1495 qdf_nbuf_free(fisa_flow->head_skb);
1496 goto out;
1497 }
1498
1499 if (qdf_unlikely(fisa_flow_head_skb_vdev != fisa_flow->vdev)) {
1500 if (qdf_unlikely(fisa_flow_head_skb_vdev->vdev_id ==
1501 fisa_flow->vdev_id))
1502 goto fisa_flow_vdev_fail;
1503
1504 fisa_flow_vdev = dp_vdev_get_ref_by_id(
1505 cdp_soc_t_to_dp_soc(cdp_soc),
1506 fisa_flow->vdev_id,
1507 DP_MOD_ID_RX);
1508 if (qdf_unlikely(!fisa_flow_vdev))
1509 goto fisa_flow_vdev_fail;
1510
1511 if (qdf_unlikely(fisa_flow_vdev != fisa_flow->vdev))
1512 goto fisa_flow_vdev_mismatch;
1513
1514 /*
1515 * vdev_id may mismatch in case of MLO link switch.
1516 * Check if the vdevs belong to same MLD,
1517 * if yes, then submit the flow else drop the packets.
1518 */
1519 if (qdf_unlikely(qdf_mem_cmp(
1520 fisa_flow_vdev->mld_mac_addr.raw,
1521 fisa_flow_head_skb_vdev->mld_mac_addr.raw,
1522 QDF_MAC_ADDR_SIZE) != 0)) {
1523 goto fisa_flow_vdev_mismatch;
1524 } else {
1525 fisa_flow->same_mld_vdev_mismatch++;
1526 /* Continue with aggregation */
1527
1528 /* Release ref to old vdev */
1529 dp_vdev_unref_delete(cdp_soc_t_to_dp_soc(cdp_soc),
1530 fisa_flow_head_skb_vdev,
1531 DP_MOD_ID_RX);
1532
1533 /*
1534 * Update vdev_id and let it loop to find this
1535 * vdev by ref.
1536 */
1537 vdev_id = fisa_flow_vdev->vdev_id;
1538 dp_vdev_unref_delete(cdp_soc_t_to_dp_soc(cdp_soc),
1539 fisa_flow_vdev,
1540 DP_MOD_ID_RX);
1541 goto get_new_vdev_ref;
1542 }
1543 } else {
1544 goto out;
1545 }
1546
1547 fisa_flow_vdev_mismatch:
1548 dp_vdev_unref_delete(cdp_soc_t_to_dp_soc(cdp_soc),
1549 fisa_flow_vdev,
1550 DP_MOD_ID_RX);
1551
1552 fisa_flow_vdev_fail:
1553 qdf_nbuf_free(fisa_flow->head_skb);
1554 dp_vdev_unref_delete(cdp_soc_t_to_dp_soc(cdp_soc),
1555 fisa_flow_head_skb_vdev,
1556 DP_MOD_ID_RX);
1557 fisa_flow_head_skb_vdev = NULL;
1558 out:
1559 return fisa_flow_head_skb_vdev;
1560 }
1561 #else
1562 static inline struct dp_vdev *
dp_fisa_rx_get_flow_flush_vdev_ref(ol_txrx_soc_handle cdp_soc,struct dp_fisa_rx_sw_ft * fisa_flow)1563 dp_fisa_rx_get_flow_flush_vdev_ref(ol_txrx_soc_handle cdp_soc,
1564 struct dp_fisa_rx_sw_ft *fisa_flow)
1565 {
1566 struct dp_vdev *fisa_flow_head_skb_vdev;
1567
1568 fisa_flow_head_skb_vdev = dp_vdev_get_ref_by_id(
1569 cdp_soc_t_to_dp_soc(cdp_soc),
1570 QDF_NBUF_CB_RX_VDEV_ID(fisa_flow->head_skb),
1571 DP_MOD_ID_RX);
1572 if (qdf_unlikely(!fisa_flow_head_skb_vdev ||
1573 (fisa_flow_head_skb_vdev != fisa_flow->vdev))) {
1574 qdf_nbuf_free(fisa_flow->head_skb);
1575 goto out;
1576 }
1577
1578 return fisa_flow_head_skb_vdev;
1579
1580 out:
1581 if (fisa_flow_head_skb_vdev)
1582 dp_vdev_unref_delete(cdp_soc_t_to_dp_soc(cdp_soc),
1583 fisa_flow_head_skb_vdev,
1584 DP_MOD_ID_RX);
1585 return NULL;
1586 }
1587 #endif
1588
1589 /**
1590 * dp_rx_fisa_flush_udp_flow() - Flush all aggregated nbuf of the udp flow
1591 * @vdev: handle to dp_vdev
1592 * @fisa_flow: Flow for which aggregates to be flushed
1593 *
1594 * Return: None
1595 */
1596 static void
dp_rx_fisa_flush_udp_flow(struct dp_vdev * vdev,struct dp_fisa_rx_sw_ft * fisa_flow)1597 dp_rx_fisa_flush_udp_flow(struct dp_vdev *vdev,
1598 struct dp_fisa_rx_sw_ft *fisa_flow)
1599 {
1600 qdf_nbuf_t head_skb = fisa_flow->head_skb;
1601 qdf_net_iphdr_t *head_skb_iph;
1602 qdf_net_udphdr_t *head_skb_udp_hdr;
1603 qdf_nbuf_shared_info_t shinfo;
1604 qdf_nbuf_t linear_skb;
1605 struct dp_vdev *fisa_flow_vdev;
1606 ol_txrx_soc_handle cdp_soc = fisa_flow->dp_ctx->cdp_soc;
1607
1608 dp_fisa_debug("head_skb %pK", head_skb);
1609 dp_fisa_debug("cumulative ip length %d",
1610 fisa_flow->adjusted_cumulative_ip_length);
1611 if (!head_skb) {
1612 dp_fisa_debug("Already flushed");
1613 return;
1614 }
1615
1616 qdf_nbuf_set_hash(head_skb, QDF_NBUF_CB_RX_FLOW_ID(head_skb));
1617 head_skb->sw_hash = 1;
1618 if (qdf_nbuf_get_ext_list(head_skb)) {
1619 __sum16 pseudo;
1620
1621 shinfo = qdf_nbuf_get_shinfo(head_skb);
1622 /* Update the head_skb before flush */
1623 dp_fisa_debug("cumu ip length host order 0x%x",
1624 fisa_flow->adjusted_cumulative_ip_length);
1625 head_skb_iph = (qdf_net_iphdr_t *)(qdf_nbuf_data(head_skb) +
1626 fisa_flow->head_skb_ip_hdr_offset);
1627 dp_fisa_debug("iph ptr %pK", head_skb_iph);
1628
1629 head_skb_udp_hdr = fisa_flow->head_skb_udp_hdr;
1630
1631 dp_fisa_debug("udph ptr %pK", head_skb_udp_hdr);
1632
1633 dp_fisa_debug("ip_len 0x%x", qdf_ntohs(head_skb_iph->ip_len));
1634
1635 /* data_len is total length of non head_skb,
1636 * cumulative ip length is including head_skb ip length also
1637 */
1638 qdf_nbuf_set_data_len(head_skb,
1639 ((fisa_flow->adjusted_cumulative_ip_length) -
1640 qdf_ntohs(head_skb_udp_hdr->udp_len)));
1641
1642 qdf_nbuf_set_len(head_skb, (qdf_nbuf_len(head_skb) +
1643 qdf_nbuf_get_only_data_len(head_skb)));
1644
1645 head_skb_iph->ip_len =
1646 qdf_htons((fisa_flow->adjusted_cumulative_ip_length)
1647 + /* IP hdr len */
1648 fisa_flow->head_skb_l4_hdr_offset);
1649 pseudo = ~qdf_csum_tcpudp_magic(head_skb_iph->ip_saddr,
1650 head_skb_iph->ip_daddr,
1651 fisa_flow->adjusted_cumulative_ip_length,
1652 head_skb_iph->ip_proto, 0);
1653
1654 head_skb_iph->ip_check = 0;
1655 head_skb_iph->ip_check = qdf_ip_fast_csum(head_skb_iph,
1656 head_skb_iph->ip_hl);
1657
1658 head_skb_udp_hdr->udp_len =
1659 qdf_htons(qdf_ntohs(head_skb_iph->ip_len) -
1660 fisa_flow->head_skb_l4_hdr_offset);
1661 head_skb_udp_hdr->udp_cksum = pseudo;
1662 qdf_nbuf_set_csum_start(head_skb, ((u8 *)head_skb_udp_hdr -
1663 qdf_nbuf_head(head_skb)));
1664 qdf_nbuf_set_csum_offset(head_skb,
1665 offsetof(qdf_net_udphdr_t, udp_cksum));
1666
1667 qdf_nbuf_set_gso_size(head_skb, fisa_flow->cur_aggr_gso_size);
1668 dp_fisa_debug("gso_size %d, udp_len %d\n",
1669 qdf_nbuf_get_gso_size(head_skb),
1670 qdf_ntohs(head_skb_udp_hdr->udp_len));
1671 qdf_nbuf_set_gso_segs(head_skb, fisa_flow->cur_aggr);
1672 qdf_nbuf_set_gso_type_udp_l4(head_skb);
1673 qdf_nbuf_set_ip_summed_partial(head_skb);
1674 }
1675
1676 qdf_nbuf_set_next(fisa_flow->head_skb, NULL);
1677 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(fisa_flow->head_skb) = 1;
1678 if (fisa_flow->last_skb)
1679 qdf_nbuf_set_next(fisa_flow->last_skb, NULL);
1680
1681 hex_dump_skb_data(fisa_flow->head_skb, false);
1682
1683 fisa_flow_vdev = dp_fisa_rx_get_flow_flush_vdev_ref(cdp_soc, fisa_flow);
1684 if (!fisa_flow_vdev)
1685 goto vdev_ref_get_fail;
1686
1687 dp_fisa_debug("fisa_flow->curr_aggr %d", fisa_flow->cur_aggr);
1688 linear_skb = dp_fisa_rx_linear_skb(vdev, fisa_flow->head_skb, 24000);
1689 if (linear_skb) {
1690 if (!vdev->osif_rx || QDF_STATUS_SUCCESS !=
1691 vdev->osif_rx(vdev->osif_vdev, linear_skb))
1692 qdf_nbuf_free(linear_skb);
1693 /* Free non linear skb */
1694 qdf_nbuf_free(fisa_flow->head_skb);
1695 } else {
1696 /*
1697 * Sanity check head data_len should be equal to sum of
1698 * all fragments length
1699 */
1700 if (qdf_unlikely(fisa_flow->frags_cumulative_len !=
1701 qdf_nbuf_get_only_data_len(fisa_flow->head_skb))) {
1702 qdf_assert(0);
1703 /* Drop the aggregate */
1704 qdf_nbuf_free(fisa_flow->head_skb);
1705 goto out;
1706 }
1707
1708 if (!vdev->osif_rx || QDF_STATUS_SUCCESS !=
1709 vdev->osif_rx(vdev->osif_vdev, fisa_flow->head_skb))
1710 qdf_nbuf_free(fisa_flow->head_skb);
1711 }
1712
1713 out:
1714 if (fisa_flow_vdev)
1715 dp_vdev_unref_delete(cdp_soc_t_to_dp_soc(cdp_soc),
1716 fisa_flow_vdev,
1717 DP_MOD_ID_RX);
1718
1719 vdev_ref_get_fail:
1720 fisa_flow->head_skb = NULL;
1721 fisa_flow->last_skb = NULL;
1722
1723 fisa_flow->flush_count++;
1724 }
1725
1726 /**
1727 * dp_rx_fisa_flush_tcp_flow() - Flush all aggregated nbuf of the TCP flow
1728 * @vdev: handle to dp_vdev
1729 * @fisa_flow: Flow for which aggregates to be flushed
1730 *
1731 * Return: None
1732 */
1733 static void
dp_rx_fisa_flush_tcp_flow(struct dp_vdev * vdev,struct dp_fisa_rx_sw_ft * fisa_flow)1734 dp_rx_fisa_flush_tcp_flow(struct dp_vdev *vdev,
1735 struct dp_fisa_rx_sw_ft *fisa_flow)
1736 {
1737 qdf_nbuf_t head_skb = fisa_flow->head_skb;
1738 qdf_net_iphdr_t *head_skb_iph;
1739 qdf_nbuf_shared_info_t shinfo;
1740
1741 if (!head_skb) {
1742 dp_fisa_debug("Already flushed");
1743 return;
1744 }
1745
1746 shinfo = qdf_nbuf_get_shinfo(head_skb);
1747
1748 /* Update the head_skb before flush */
1749 head_skb->hash = fisa_flow->flow_hash;
1750 head_skb->sw_hash = 1;
1751 shinfo->gso_type = SKB_GSO_UDP_L4;
1752
1753 head_skb_iph = (qdf_net_iphdr_t *)(qdf_nbuf_data(head_skb) +
1754 fisa_flow->head_skb_ip_hdr_offset);
1755
1756 head_skb_iph->ip_len = fisa_flow->adjusted_cumulative_ip_length;
1757 head_skb_iph->ip_check = ip_fast_csum((u8 *)head_skb_iph,
1758 head_skb_iph->ip_hl);
1759
1760 qdf_nbuf_set_next(fisa_flow->head_skb, NULL);
1761 if (fisa_flow->last_skb)
1762 qdf_nbuf_set_next(fisa_flow->last_skb, NULL);
1763 vdev->osif_rx(vdev->osif_vdev, fisa_flow->head_skb);
1764
1765 fisa_flow->head_skb = NULL;
1766
1767 fisa_flow->flush_count++;
1768 }
1769
1770 /**
1771 * dp_rx_fisa_flush_flow() - Flush all aggregated nbuf of the flow
1772 * @vdev: handle to dp_vdev
1773 * @flow: Flow for which aggregates to be flushed
1774 *
1775 * Return: None
1776 */
dp_rx_fisa_flush_flow(struct dp_vdev * vdev,struct dp_fisa_rx_sw_ft * flow)1777 static void dp_rx_fisa_flush_flow(struct dp_vdev *vdev,
1778 struct dp_fisa_rx_sw_ft *flow)
1779 {
1780 dp_fisa_debug("dp_rx_fisa_flush_flow");
1781
1782 if (flow->is_flow_udp)
1783 dp_rx_fisa_flush_udp_flow(vdev, flow);
1784 else
1785 dp_rx_fisa_flush_tcp_flow(vdev, flow);
1786 }
1787
1788 /**
1789 * dp_fisa_aggregation_should_stop - check if fisa aggregate should stop
1790 * @fisa_flow: Handle SW flow entry
1791 * @hal_aggr_count: current aggregate count from RX PKT TLV
1792 * @hal_cumulative_ip_len: current cumulative ip length from RX PKT TLV
1793 * @rx_tlv_hdr: current msdu RX PKT TLV
1794 *
1795 * Return: true - current flow aggregation should stop,
1796 * false - continue to aggregate.
1797 */
dp_fisa_aggregation_should_stop(struct dp_fisa_rx_sw_ft * fisa_flow,uint32_t hal_aggr_count,uint16_t hal_cumulative_ip_len,uint8_t * rx_tlv_hdr)1798 static bool dp_fisa_aggregation_should_stop(
1799 struct dp_fisa_rx_sw_ft *fisa_flow,
1800 uint32_t hal_aggr_count,
1801 uint16_t hal_cumulative_ip_len,
1802 uint8_t *rx_tlv_hdr)
1803 {
1804 uint32_t msdu_len =
1805 hal_rx_msdu_start_msdu_len_get(fisa_flow->dp_ctx->hal_soc,
1806 rx_tlv_hdr);
1807 uint32_t l3_hdr_offset, l4_hdr_offset, l2_l3_hdr_len;
1808 uint32_t cumulative_ip_len_delta = hal_cumulative_ip_len -
1809 fisa_flow->hal_cumultive_ip_len;
1810
1811 hal_rx_get_l3_l4_offsets(fisa_flow->dp_ctx->hal_soc, rx_tlv_hdr,
1812 &l3_hdr_offset, &l4_hdr_offset);
1813
1814 l2_l3_hdr_len = l3_hdr_offset + l4_hdr_offset;
1815
1816 /**
1817 * kernel network panic if UDP data length < 12 bytes get aggregated,
1818 * no solid conclusion currently, as a SW WAR, only allow UDP
1819 * aggregation if UDP data length >= 16 bytes.
1820 *
1821 * current cumulative ip length should > last cumulative_ip_len
1822 * and <= last cumulative_ip_len + 1478, also current aggregate
1823 * count should be equal to last aggregate count + 1,
1824 * cumulative_ip_len delta should be equal to current msdu length
1825 * - l4 header offset,
1826 * otherwise, current fisa flow aggregation should be stopped.
1827 */
1828 if (fisa_flow->do_not_aggregate ||
1829 msdu_len < (l2_l3_hdr_len + FISA_MIN_L4_AND_DATA_LEN) ||
1830 hal_cumulative_ip_len <= fisa_flow->hal_cumultive_ip_len ||
1831 cumulative_ip_len_delta > FISA_MAX_SINGLE_CUMULATIVE_IP_LEN ||
1832 (fisa_flow->last_hal_aggr_count + 1) != hal_aggr_count ||
1833 cumulative_ip_len_delta != (msdu_len - l2_l3_hdr_len))
1834 return true;
1835
1836 return false;
1837 }
1838
1839 /**
1840 * dp_add_nbuf_to_fisa_flow() - Aggregate incoming nbuf
1841 * @fisa_hdl: handle to fisa context
1842 * @vdev: handle DP vdev
1843 * @nbuf: Incoming nbuf
1844 * @fisa_flow: Handle SW flow entry
1845 *
1846 * Return: Success on aggregation
1847 */
dp_add_nbuf_to_fisa_flow(struct dp_rx_fst * fisa_hdl,struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_fisa_rx_sw_ft * fisa_flow)1848 static int dp_add_nbuf_to_fisa_flow(struct dp_rx_fst *fisa_hdl,
1849 struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1850 struct dp_fisa_rx_sw_ft *fisa_flow)
1851 {
1852 bool flow_aggr_cont;
1853 uint8_t *rx_tlv_hdr = qdf_nbuf_data(nbuf);
1854 uint16_t hal_cumulative_ip_len;
1855 hal_soc_handle_t hal_soc_hdl = fisa_hdl->dp_ctx->hal_soc;
1856 uint32_t hal_aggr_count;
1857 uint8_t napi_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
1858 uint32_t fse_metadata;
1859 bool cce_match;
1860
1861 dump_tlvs(hal_soc_hdl, rx_tlv_hdr, QDF_TRACE_LEVEL_INFO_HIGH);
1862 dp_fisa_debug("nbuf: %pK nbuf->next:%pK nbuf->data:%pK len %d data_len %d",
1863 nbuf, qdf_nbuf_next(nbuf), qdf_nbuf_data(nbuf),
1864 qdf_nbuf_len(nbuf), qdf_nbuf_get_only_data_len(nbuf));
1865
1866 /* Packets of the flow are arriving on a different REO than
1867 * the one configured.
1868 */
1869 if (qdf_unlikely(fisa_flow->napi_id != napi_id)) {
1870 fse_metadata =
1871 hal_rx_msdu_fse_metadata_get(hal_soc_hdl, rx_tlv_hdr);
1872 cce_match = hal_rx_msdu_cce_match_get(hal_soc_hdl, rx_tlv_hdr);
1873 /*
1874 * For two cases the fse_metadata will not match the metadata
1875 * from the fisa_flow_table entry
1876 * 1) Flow has been evicted (lru deletion), and this packet is
1877 * one of the few packets pending in the rx ring from the prev
1878 * flow
1879 * 2) HW flow table match fails for some packets in the
1880 * currently active flow.
1881 */
1882 if (cce_match) {
1883 DP_STATS_INC(fisa_hdl, reo_mismatch.allow_cce_match,
1884 1);
1885 return FISA_AGGR_NOT_ELIGIBLE;
1886 }
1887
1888 if (fse_metadata != fisa_flow->metadata) {
1889 DP_STATS_INC(fisa_hdl,
1890 reo_mismatch.allow_fse_metdata_mismatch,
1891 1);
1892 return FISA_AGGR_NOT_ELIGIBLE;
1893 }
1894
1895 dp_err("REO id mismatch flow: %pK napi_id: %u nbuf: %pK reo_id: %u",
1896 fisa_flow, fisa_flow->napi_id, nbuf, napi_id);
1897 DP_STATS_INC(fisa_hdl, reo_mismatch.allow_non_aggr, 1);
1898 QDF_BUG(0);
1899 return FISA_AGGR_NOT_ELIGIBLE;
1900 }
1901
1902 hal_cumulative_ip_len = hal_rx_get_fisa_cumulative_ip_length(
1903 hal_soc_hdl,
1904 rx_tlv_hdr);
1905 flow_aggr_cont = hal_rx_get_fisa_flow_agg_continuation(hal_soc_hdl,
1906 rx_tlv_hdr);
1907 hal_aggr_count = hal_rx_get_fisa_flow_agg_count(hal_soc_hdl,
1908 rx_tlv_hdr);
1909
1910 if (!flow_aggr_cont) {
1911 /* Start of new aggregation for the flow
1912 * Flush previous aggregates for this flow
1913 */
1914 dp_fisa_debug("no fgc nbuf %pK, flush %pK napi %d", nbuf,
1915 fisa_flow, QDF_NBUF_CB_RX_CTX_ID(nbuf));
1916 dp_rx_fisa_flush_flow(vdev, fisa_flow);
1917 /* Clear of previoud context values */
1918 fisa_flow->napi_flush_cumulative_l4_checksum = 0;
1919 fisa_flow->napi_flush_cumulative_ip_length = 0;
1920 fisa_flow->cur_aggr = 0;
1921 fisa_flow->do_not_aggregate = false;
1922 fisa_flow->hal_cumultive_ip_len = 0;
1923 fisa_flow->last_hal_aggr_count = 0;
1924 /* Check fisa related HW TLV correct or not */
1925 if (qdf_unlikely(dp_fisa_aggregation_should_stop(
1926 fisa_flow,
1927 hal_aggr_count,
1928 hal_cumulative_ip_len,
1929 rx_tlv_hdr))) {
1930 qdf_assert(0);
1931 fisa_flow->do_not_aggregate = true;
1932 /*
1933 * do not aggregate until next new aggregation
1934 * start.
1935 */
1936 goto invalid_fisa_assist;
1937 }
1938 } else if (qdf_unlikely(dp_fisa_aggregation_should_stop(
1939 fisa_flow,
1940 hal_aggr_count,
1941 hal_cumulative_ip_len,
1942 rx_tlv_hdr))) {
1943 qdf_assert(0);
1944 /* Either HW cumulative ip length is wrong, or packet is missed
1945 * Flush the flow and do not aggregate until next start new
1946 * aggreagtion
1947 */
1948 dp_rx_fisa_flush_flow(vdev, fisa_flow);
1949 fisa_flow->do_not_aggregate = true;
1950 fisa_flow->cur_aggr = 0;
1951 fisa_flow->napi_flush_cumulative_ip_length = 0;
1952 goto invalid_fisa_assist;
1953 } else {
1954 /* takecare to skip the udp hdr len for sub sequent cumulative
1955 * length
1956 */
1957 fisa_flow->cur_aggr++;
1958 }
1959
1960 dp_fisa_debug("nbuf %pK cumulat_ip_length %d flow %pK fl aggr cont %d",
1961 nbuf, hal_cumulative_ip_len, fisa_flow, flow_aggr_cont);
1962
1963 fisa_flow->aggr_count++;
1964 fisa_flow->last_hal_aggr_count = hal_aggr_count;
1965 fisa_flow->hal_cumultive_ip_len = hal_cumulative_ip_len;
1966
1967 if (!fisa_flow->head_skb) {
1968 /* This is start of aggregation for the flow, save the offsets*/
1969 fisa_flow->napi_flush_cumulative_l4_checksum = 0;
1970 fisa_flow->cur_aggr = 0;
1971 }
1972
1973 fisa_flow->adjusted_cumulative_ip_length =
1974 /* cumulative ip len has all the aggr msdu udp header len
1975 * Aggr UDP msdu has one UDP header len
1976 */
1977 (hal_cumulative_ip_len -
1978 (fisa_flow->cur_aggr * sizeof(qdf_net_udphdr_t))) -
1979 fisa_flow->napi_flush_cumulative_ip_length;
1980
1981 /**
1982 * cur_aggr does not include the head_skb, so compare with
1983 * FISA_FLOW_MAX_AGGR_COUNT - 1.
1984 */
1985 if (fisa_flow->cur_aggr > (FISA_FLOW_MAX_AGGR_COUNT - 1))
1986 dp_err("HAL cumulative_ip_length %d", hal_cumulative_ip_len);
1987
1988 dp_fisa_debug("hal cum_len 0x%x - napI_cumu_len 0x%x = flow_cum_len 0x%x cur_aggr %d",
1989 hal_cumulative_ip_len,
1990 fisa_flow->napi_flush_cumulative_ip_length,
1991 fisa_flow->adjusted_cumulative_ip_length,
1992 fisa_flow->cur_aggr);
1993
1994 if (fisa_flow->adjusted_cumulative_ip_length >
1995 FISA_FLOW_MAX_CUMULATIVE_IP_LEN) {
1996 dp_err("fisa_flow %pK nbuf %pK", fisa_flow, nbuf);
1997 dp_err("fisa_flow->adjusted_cumulative_ip_length %d",
1998 fisa_flow->adjusted_cumulative_ip_length);
1999 dp_err("HAL cumulative_ip_length %d", hal_cumulative_ip_len);
2000 dp_err("napi_flush_cumulative_ip_length %d",
2001 fisa_flow->napi_flush_cumulative_ip_length);
2002 qdf_assert(0);
2003 }
2004
2005 dp_fisa_record_pkt(fisa_flow, nbuf, rx_tlv_hdr,
2006 fisa_hdl->rx_pkt_tlv_size);
2007
2008 if (fisa_flow->is_flow_udp) {
2009 dp_rx_fisa_aggr_udp(fisa_hdl, fisa_flow, nbuf);
2010 } else if (fisa_flow->is_flow_tcp) {
2011 qdf_assert(0);
2012 dp_rx_fisa_aggr_tcp(fisa_hdl, fisa_flow, nbuf);
2013 }
2014
2015 fisa_flow->last_accessed_ts = qdf_get_log_timestamp();
2016
2017 return FISA_AGGR_DONE;
2018
2019 invalid_fisa_assist:
2020 /* Not eligible aggregation deliver frame without FISA */
2021 return FISA_AGGR_NOT_ELIGIBLE;
2022 }
2023
2024 /**
2025 * dp_is_nbuf_bypass_fisa() - FISA bypass check for RX frame
2026 * @nbuf: RX nbuf pointer
2027 *
2028 * Return: true if FISA should be bypassed else false
2029 */
dp_is_nbuf_bypass_fisa(qdf_nbuf_t nbuf)2030 static bool dp_is_nbuf_bypass_fisa(qdf_nbuf_t nbuf)
2031 {
2032 /* RX frame from non-regular path or DHCP packet */
2033 if (QDF_NBUF_CB_RX_TCP_PROTO(nbuf) ||
2034 qdf_nbuf_is_exc_frame(nbuf) ||
2035 qdf_nbuf_is_ipv4_dhcp_pkt(nbuf) ||
2036 qdf_nbuf_is_da_mcbc(nbuf))
2037 return true;
2038
2039 return false;
2040 }
2041
2042 /**
2043 * dp_rx_fisa_flush_by_intf_ctx_id() - Flush fisa aggregates per dp_interface
2044 * and rx context id
2045 * @dp_intf: DP interface handle
2046 * @rx_ctx_id: Rx context id
2047 *
2048 * Return: Success on flushing the flows for the vdev and rx ctx id
2049 */
2050 static
dp_rx_fisa_flush_by_intf_ctx_id(struct wlan_dp_intf * dp_intf,uint8_t rx_ctx_id)2051 QDF_STATUS dp_rx_fisa_flush_by_intf_ctx_id(struct wlan_dp_intf *dp_intf,
2052 uint8_t rx_ctx_id)
2053 {
2054 struct wlan_dp_psoc_context *dp_ctx = dp_get_context();
2055 struct dp_rx_fst *fisa_hdl = dp_ctx->rx_fst;
2056 struct dp_fisa_rx_sw_ft *sw_ft_entry =
2057 (struct dp_fisa_rx_sw_ft *)fisa_hdl->base;
2058 int ft_size = fisa_hdl->max_entries;
2059 int i;
2060
2061 dp_rx_fisa_acquire_ft_lock(fisa_hdl, rx_ctx_id);
2062 for (i = 0; i < ft_size; i++) {
2063 if (sw_ft_entry[i].is_populated &&
2064 dp_intf == sw_ft_entry[i].dp_intf &&
2065 sw_ft_entry[i].napi_id == rx_ctx_id) {
2066 dp_fisa_debug("flushing %d %pk dp_intf %pK napi id:%d",
2067 i, &sw_ft_entry[i], dp_intf, rx_ctx_id);
2068 dp_rx_fisa_flush_flow_wrap(&sw_ft_entry[i]);
2069 }
2070 }
2071 dp_rx_fisa_release_ft_lock(fisa_hdl, rx_ctx_id);
2072
2073 return QDF_STATUS_SUCCESS;
2074 }
2075
2076 /**
2077 * dp_fisa_disallowed_for_vdev() - Check if fisa is allowed on vdev
2078 * @soc: core txrx main context
2079 * @vdev: Handle DP vdev
2080 * @rx_ctx_id: Rx context id
2081 *
2082 * Return: true if fisa is disallowed for vdev else false
2083 */
dp_fisa_disallowed_for_vdev(struct dp_soc * soc,struct dp_vdev * vdev,uint8_t rx_ctx_id)2084 static bool dp_fisa_disallowed_for_vdev(struct dp_soc *soc,
2085 struct dp_vdev *vdev,
2086 uint8_t rx_ctx_id)
2087 {
2088 struct wlan_dp_intf *dp_intf;
2089
2090 dp_intf = dp_fisa_rx_get_dp_intf_for_vdev(vdev);
2091 if (!dp_intf->fisa_disallowed[rx_ctx_id]) {
2092 if (dp_intf->fisa_force_flushed[rx_ctx_id])
2093 dp_intf->fisa_force_flushed[rx_ctx_id] = 0;
2094 return false;
2095 }
2096
2097 if (!dp_intf->fisa_force_flushed[rx_ctx_id]) {
2098 dp_rx_fisa_flush_by_intf_ctx_id(dp_intf, rx_ctx_id);
2099 dp_intf->fisa_force_flushed[rx_ctx_id] = 1;
2100 }
2101
2102 return true;
2103 }
2104
dp_fisa_rx(struct wlan_dp_psoc_context * dp_ctx,struct dp_vdev * vdev,qdf_nbuf_t nbuf_list)2105 QDF_STATUS dp_fisa_rx(struct wlan_dp_psoc_context *dp_ctx,
2106 struct dp_vdev *vdev,
2107 qdf_nbuf_t nbuf_list)
2108 {
2109 struct dp_soc *soc = cdp_soc_t_to_dp_soc(dp_ctx->cdp_soc);
2110 struct dp_rx_fst *dp_fisa_rx_hdl = dp_ctx->rx_fst;
2111 qdf_nbuf_t head_nbuf;
2112 qdf_nbuf_t next_nbuf;
2113 struct dp_fisa_rx_sw_ft *fisa_flow;
2114 int fisa_ret;
2115 uint8_t rx_ctx_id = QDF_NBUF_CB_RX_CTX_ID(nbuf_list);
2116 uint32_t tlv_reo_dest_ind;
2117 uint8_t reo_id;
2118
2119 head_nbuf = nbuf_list;
2120
2121 while (head_nbuf) {
2122 next_nbuf = head_nbuf->next;
2123 qdf_nbuf_set_next(head_nbuf, NULL);
2124
2125 /* bypass FISA check */
2126 if (dp_is_nbuf_bypass_fisa(head_nbuf))
2127 goto deliver_nbuf;
2128
2129 if (dp_fisa_disallowed_for_vdev(soc, vdev, rx_ctx_id))
2130 goto deliver_nbuf;
2131
2132 if (qdf_atomic_read(&dp_ctx->skip_fisa_param.skip_fisa)) {
2133 if (!dp_ctx->skip_fisa_param.fisa_force_flush[rx_ctx_id]) {
2134 dp_rx_fisa_flush_by_ctx_id(soc, rx_ctx_id);
2135 dp_ctx->skip_fisa_param.
2136 fisa_force_flush[rx_ctx_id] = 1;
2137 }
2138 goto deliver_nbuf;
2139 } else if (dp_ctx->skip_fisa_param.fisa_force_flush[rx_ctx_id]) {
2140 dp_ctx->skip_fisa_param.fisa_force_flush[rx_ctx_id] = 0;
2141 }
2142
2143 qdf_nbuf_push_head(head_nbuf, dp_fisa_rx_hdl->rx_pkt_tlv_size +
2144 QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(head_nbuf));
2145
2146 hal_rx_msdu_get_reo_destination_indication(dp_ctx->hal_soc,
2147 (uint8_t *)qdf_nbuf_data(head_nbuf),
2148 &tlv_reo_dest_ind);
2149
2150 /* Skip FISA aggregation and drop the frame if RDI is REO2TCL. */
2151 if (qdf_unlikely(tlv_reo_dest_ind == REO_REMAP_TCL)) {
2152 qdf_nbuf_free(head_nbuf);
2153 head_nbuf = next_nbuf;
2154 DP_STATS_INC(dp_fisa_rx_hdl, incorrect_rdi, 1);
2155 continue;
2156 }
2157
2158 reo_id = QDF_NBUF_CB_RX_CTX_ID(head_nbuf);
2159 dp_rx_fisa_acquire_ft_lock(dp_fisa_rx_hdl, reo_id);
2160
2161 /* Add new flow if the there is no ongoing flow */
2162 fisa_flow = dp_rx_get_fisa_flow(dp_fisa_rx_hdl, vdev,
2163 head_nbuf);
2164
2165 /* Do not FISA aggregate IPSec packets */
2166 if (fisa_flow &&
2167 fisa_flow->rx_flow_tuple_info.is_exception) {
2168 dp_rx_fisa_release_ft_lock(dp_fisa_rx_hdl, reo_id);
2169 goto pull_nbuf;
2170 }
2171
2172 /* Fragmented skb do not handle via fisa
2173 * get that flow and deliver that flow to rx_thread
2174 */
2175 if (qdf_unlikely(qdf_nbuf_get_ext_list(head_nbuf))) {
2176 dp_fisa_debug("Fragmented skb, will not be FISAed");
2177 if (fisa_flow)
2178 dp_rx_fisa_flush_flow(vdev, fisa_flow);
2179
2180 dp_rx_fisa_release_ft_lock(dp_fisa_rx_hdl, reo_id);
2181 goto pull_nbuf;
2182 }
2183
2184 if (!fisa_flow) {
2185 dp_rx_fisa_release_ft_lock(dp_fisa_rx_hdl, reo_id);
2186 goto pull_nbuf;
2187 }
2188
2189 fisa_ret = dp_add_nbuf_to_fisa_flow(dp_fisa_rx_hdl, vdev,
2190 head_nbuf, fisa_flow);
2191
2192 dp_rx_fisa_release_ft_lock(dp_fisa_rx_hdl, reo_id);
2193
2194 if (fisa_ret == FISA_AGGR_DONE)
2195 goto next_msdu;
2196
2197 pull_nbuf:
2198 wlan_dp_nbuf_skip_rx_pkt_tlv(dp_ctx, dp_fisa_rx_hdl, head_nbuf);
2199
2200 deliver_nbuf: /* Deliver without FISA */
2201 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head_nbuf) = 1;
2202 qdf_nbuf_set_next(head_nbuf, NULL);
2203 hex_dump_skb_data(head_nbuf, false);
2204 if (!vdev->osif_rx || QDF_STATUS_SUCCESS !=
2205 vdev->osif_rx(vdev->osif_vdev, head_nbuf))
2206 qdf_nbuf_free(head_nbuf);
2207 next_msdu:
2208 head_nbuf = next_nbuf;
2209 }
2210
2211 return QDF_STATUS_SUCCESS;
2212 }
2213
2214 /**
2215 * dp_rx_fisa_flush_flow_wrap() - flush fisa flow by invoking
2216 * dp_rx_fisa_flush_flow()
2217 * @sw_ft: fisa flow for which aggregates to be flushed
2218 *
2219 * Return: None.
2220 */
dp_rx_fisa_flush_flow_wrap(struct dp_fisa_rx_sw_ft * sw_ft)2221 static void dp_rx_fisa_flush_flow_wrap(struct dp_fisa_rx_sw_ft *sw_ft)
2222 {
2223 /* Save the ip_len and checksum as hardware assist is
2224 * always based on his start of aggregation
2225 */
2226 sw_ft->napi_flush_cumulative_l4_checksum =
2227 sw_ft->cumulative_l4_checksum;
2228 sw_ft->napi_flush_cumulative_ip_length =
2229 sw_ft->hal_cumultive_ip_len;
2230 dp_fisa_debug("napi_flush_cumulative_ip_length 0x%x",
2231 sw_ft->napi_flush_cumulative_ip_length);
2232
2233 dp_rx_fisa_flush_flow(sw_ft->vdev,
2234 sw_ft);
2235 sw_ft->cur_aggr = 0;
2236 }
2237
dp_rx_fisa_flush_by_ctx_id(struct dp_soc * soc,int napi_id)2238 QDF_STATUS dp_rx_fisa_flush_by_ctx_id(struct dp_soc *soc, int napi_id)
2239 {
2240 struct wlan_dp_psoc_context *dp_ctx = dp_get_context();
2241 struct dp_rx_fst *fisa_hdl = dp_ctx->rx_fst;
2242 struct dp_fisa_rx_sw_ft *sw_ft_entry =
2243 (struct dp_fisa_rx_sw_ft *)fisa_hdl->base;
2244 int ft_size = fisa_hdl->max_entries;
2245 int i;
2246
2247 dp_rx_fisa_acquire_ft_lock(fisa_hdl, napi_id);
2248 for (i = 0; i < ft_size; i++) {
2249 if (sw_ft_entry[i].napi_id == napi_id &&
2250 sw_ft_entry[i].is_populated) {
2251 dp_fisa_debug("flushing %d %pK napi_id %d", i,
2252 &sw_ft_entry[i], napi_id);
2253 dp_rx_fisa_flush_flow_wrap(&sw_ft_entry[i]);
2254 }
2255 }
2256 dp_rx_fisa_release_ft_lock(fisa_hdl, napi_id);
2257
2258 return QDF_STATUS_SUCCESS;
2259 }
2260
dp_rx_fisa_flush_by_vdev_id(struct dp_soc * soc,uint8_t vdev_id)2261 QDF_STATUS dp_rx_fisa_flush_by_vdev_id(struct dp_soc *soc, uint8_t vdev_id)
2262 {
2263 struct wlan_dp_psoc_context *dp_ctx = dp_get_context();
2264 struct dp_rx_fst *fisa_hdl = dp_ctx->rx_fst;
2265 struct dp_fisa_rx_sw_ft *sw_ft_entry =
2266 (struct dp_fisa_rx_sw_ft *)fisa_hdl->base;
2267 int ft_size = fisa_hdl->max_entries;
2268 int i;
2269 struct dp_vdev *vdev;
2270 uint8_t reo_id;
2271
2272 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_RX);
2273 if (qdf_unlikely(!vdev)) {
2274 dp_err("null vdev by vdev_id %d", vdev_id);
2275 return QDF_STATUS_E_FAILURE;
2276 }
2277
2278 for (i = 0; i < ft_size; i++) {
2279 reo_id = sw_ft_entry[i].napi_id;
2280 if (reo_id >= MAX_REO_DEST_RINGS)
2281 continue;
2282 dp_rx_fisa_acquire_ft_lock(fisa_hdl, reo_id);
2283 if (vdev == sw_ft_entry[i].vdev) {
2284 dp_fisa_debug("flushing %d %pk vdev %pK", i,
2285 &sw_ft_entry[i], vdev);
2286
2287 dp_rx_fisa_flush_flow_wrap(&sw_ft_entry[i]);
2288 }
2289 dp_rx_fisa_release_ft_lock(fisa_hdl, reo_id);
2290 }
2291 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
2292
2293 return QDF_STATUS_SUCCESS;
2294 }
2295
dp_suspend_fse_cache_flush(struct wlan_dp_psoc_context * dp_ctx)2296 void dp_suspend_fse_cache_flush(struct wlan_dp_psoc_context *dp_ctx)
2297 {
2298 struct dp_rx_fst *dp_fst;
2299
2300 dp_fst = dp_ctx->rx_fst;
2301 if (dp_fst) {
2302 if (qdf_atomic_read(&dp_fst->fse_cache_flush_posted))
2303 qdf_timer_sync_cancel(&dp_fst->fse_cache_flush_timer);
2304 dp_fst->fse_cache_flush_allow = false;
2305 }
2306
2307 dp_info("fse cache flush suspended");
2308 }
2309
dp_resume_fse_cache_flush(struct wlan_dp_psoc_context * dp_ctx)2310 void dp_resume_fse_cache_flush(struct wlan_dp_psoc_context *dp_ctx)
2311 {
2312 struct dp_rx_fst *dp_fst;
2313
2314 dp_fst = dp_ctx->rx_fst;
2315 if (dp_fst) {
2316 qdf_atomic_set(&dp_fst->fse_cache_flush_posted, 0);
2317 dp_fst->fse_cache_flush_allow = true;
2318 }
2319
2320 dp_info("fse cache flush resumed");
2321 }
2322
dp_set_fisa_dynamic_aggr_size_support(bool dynamic_aggr_size_support)2323 void dp_set_fisa_dynamic_aggr_size_support(bool dynamic_aggr_size_support)
2324 {
2325 struct wlan_dp_psoc_context *dp_ctx = dp_get_context();
2326
2327 dp_ctx->fisa_dynamic_aggr_size_support = dynamic_aggr_size_support;
2328 }
2329