1 /*
2 * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 #include "qdf_module.h"
19 #include "dp_types.h"
20 #include "hal_rx_flow.h"
21 #include "qdf_ssr_driver_dump.h"
22
23 /**
24 * hal_rx_flow_get_cmem_fse() - Get FSE from CMEM
25 * @hal_soc_hdl: HAL SOC handle
26 * @fse_offset: CMEM FSE offset
27 * @fse: reference where FSE will be copied
28 * @len: length of FSE
29 *
30 * Return: If read is successful or not
31 */
32 static void
hal_rx_flow_get_cmem_fse(hal_soc_handle_t hal_soc_hdl,uint32_t fse_offset,uint32_t * fse,qdf_size_t len)33 hal_rx_flow_get_cmem_fse(hal_soc_handle_t hal_soc_hdl, uint32_t fse_offset,
34 uint32_t *fse, qdf_size_t len)
35 {
36 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
37
38 if (hal_soc->ops->hal_rx_flow_get_cmem_fse) {
39 return hal_soc->ops->hal_rx_flow_get_cmem_fse(
40 hal_soc, fse_offset, fse, len);
41 }
42 }
43
44 #if defined(WLAN_SUPPORT_RX_FISA)
hal_rx_dump_fse(struct rx_flow_search_entry * fse,int index)45 static inline void hal_rx_dump_fse(struct rx_flow_search_entry *fse, int index)
46 {
47 dp_info("index %d:"
48 " src_ip_127_96 0x%x"
49 " src_ip_95_640 0x%x"
50 " src_ip_63_32 0x%x"
51 " src_ip_31_0 0x%x"
52 " dest_ip_127_96 0x%x"
53 " dest_ip_95_64 0x%x"
54 " dest_ip_63_32 0x%x"
55 " dest_ip_31_0 0x%x"
56 " src_port 0x%x"
57 " dest_port 0x%x"
58 " l4_protocol 0x%x"
59 " valid 0x%x"
60 " reo_destination_indication 0x%x"
61 " msdu_drop 0x%x"
62 " reo_destination_handler 0x%x"
63 " metadata 0x%x"
64 " aggregation_count0x%x"
65 " lro_eligible 0x%x"
66 " msdu_count 0x%x"
67 " msdu_byte_count 0x%x"
68 " timestamp 0x%x"
69 " cumulative_l4_checksum 0x%x"
70 " cumulative_ip_length 0x%x"
71 " tcp_sequence_number 0x%x",
72 index,
73 fse->src_ip_127_96,
74 fse->src_ip_95_64,
75 fse->src_ip_63_32,
76 fse->src_ip_31_0,
77 fse->dest_ip_127_96,
78 fse->dest_ip_95_64,
79 fse->dest_ip_63_32,
80 fse->dest_ip_31_0,
81 fse->src_port,
82 fse->dest_port,
83 fse->l4_protocol,
84 fse->valid,
85 fse->reo_destination_indication,
86 fse->msdu_drop,
87 fse->reo_destination_handler,
88 fse->metadata,
89 fse->aggregation_count,
90 fse->lro_eligible,
91 fse->msdu_count,
92 fse->msdu_byte_count,
93 fse->timestamp,
94 #ifdef QCA_WIFI_KIWI_V2
95 fse->cumulative_ip_length_pmac1,
96 #else
97 fse->cumulative_l4_checksum,
98 #endif
99 fse->cumulative_ip_length,
100 fse->tcp_sequence_number);
101 }
102
hal_rx_dump_fse_table(struct hal_rx_fst * fst)103 void hal_rx_dump_fse_table(struct hal_rx_fst *fst)
104 {
105 int i = 0;
106 struct rx_flow_search_entry *fse =
107 (struct rx_flow_search_entry *)fst->base_vaddr;
108
109 dp_info("Number flow table entries %d", fst->add_flow_count);
110 for (i = 0; i < fst->max_entries; i++) {
111 if (fse[i].valid)
112 hal_rx_dump_fse(&fse[i], i);
113 }
114 }
115
hal_rx_dump_cmem_fse(hal_soc_handle_t hal_soc_hdl,uint32_t fse_offset,int index)116 void hal_rx_dump_cmem_fse(hal_soc_handle_t hal_soc_hdl, uint32_t fse_offset,
117 int index)
118 {
119 struct rx_flow_search_entry fse = {0};
120
121 if (!fse_offset)
122 return;
123
124 hal_rx_flow_get_cmem_fse(hal_soc_hdl, fse_offset, (uint32_t *)&fse,
125 sizeof(struct rx_flow_search_entry));
126 if (fse.valid)
127 hal_rx_dump_fse(&fse, index);
128 }
129 #else
hal_rx_dump_fse_table(struct hal_rx_fst * fst)130 void hal_rx_dump_fse_table(struct hal_rx_fst *fst)
131 {
132 }
133
hal_rx_dump_cmem_fse(hal_soc_handle_t hal_soc_hdl,uint32_t fse_offset,int index)134 void hal_rx_dump_cmem_fse(hal_soc_handle_t hal_soc_hdl, uint32_t fse_offset,
135 int index)
136 {
137 }
138 #endif
139
140 void *
hal_rx_flow_setup_fse(hal_soc_handle_t hal_soc_hdl,struct hal_rx_fst * fst,uint32_t table_offset,struct hal_rx_flow * flow)141 hal_rx_flow_setup_fse(hal_soc_handle_t hal_soc_hdl,
142 struct hal_rx_fst *fst, uint32_t table_offset,
143 struct hal_rx_flow *flow)
144 {
145 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
146
147 if (hal_soc->ops->hal_rx_flow_setup_fse) {
148 return hal_soc->ops->hal_rx_flow_setup_fse((uint8_t *)fst,
149 table_offset,
150 (uint8_t *)flow);
151 }
152
153 return NULL;
154 }
155 qdf_export_symbol(hal_rx_flow_setup_fse);
156
157 uint32_t
hal_rx_flow_setup_cmem_fse(hal_soc_handle_t hal_soc_hdl,uint32_t cmem_ba,uint32_t table_offset,struct hal_rx_flow * flow)158 hal_rx_flow_setup_cmem_fse(hal_soc_handle_t hal_soc_hdl, uint32_t cmem_ba,
159 uint32_t table_offset, struct hal_rx_flow *flow)
160 {
161 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
162
163 if (hal_soc->ops->hal_rx_flow_setup_cmem_fse) {
164 return hal_soc->ops->hal_rx_flow_setup_cmem_fse(
165 hal_soc, cmem_ba,
166 table_offset, (uint8_t *)flow);
167 }
168
169 return 0;
170 }
171 qdf_export_symbol(hal_rx_flow_setup_cmem_fse);
172
hal_rx_flow_get_cmem_fse_timestamp(hal_soc_handle_t hal_soc_hdl,uint32_t fse_offset)173 uint32_t hal_rx_flow_get_cmem_fse_timestamp(hal_soc_handle_t hal_soc_hdl,
174 uint32_t fse_offset)
175 {
176 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
177
178 if (hal_soc->ops->hal_rx_flow_get_cmem_fse_ts) {
179 return hal_soc->ops->hal_rx_flow_get_cmem_fse_ts(hal_soc,
180 fse_offset);
181 }
182
183 return 0;
184 }
185 qdf_export_symbol(hal_rx_flow_get_cmem_fse_timestamp);
186
187 QDF_STATUS
hal_rx_flow_delete_entry(hal_soc_handle_t hal_soc_hdl,struct hal_rx_fst * fst,void * hal_rx_fse)188 hal_rx_flow_delete_entry(hal_soc_handle_t hal_soc_hdl,
189 struct hal_rx_fst *fst, void *hal_rx_fse)
190 {
191 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
192
193 if (hal_soc->ops->hal_rx_flow_delete_entry) {
194 return hal_soc->ops->hal_rx_flow_delete_entry((uint8_t *)fst,
195 hal_rx_fse);
196 }
197
198 return QDF_STATUS_E_NOSUPPORT;
199 }
200
201 qdf_export_symbol(hal_rx_flow_delete_entry);
202
203 #ifndef WLAN_SUPPORT_RX_FISA
204 /**
205 * hal_rx_fst_key_configure() - Configure the Toeplitz key in the FST
206 * @fst: Pointer to the Rx Flow Search Table
207 *
208 * Return: Success/Failure
209 */
hal_rx_fst_key_configure(struct hal_rx_fst * fst)210 static void hal_rx_fst_key_configure(struct hal_rx_fst *fst)
211 {
212 uint8_t key_bytes[HAL_FST_HASH_KEY_SIZE_BYTES];
213
214 qdf_mem_copy(key_bytes, fst->key, HAL_FST_HASH_KEY_SIZE_BYTES);
215
216 /*
217 * The Toeplitz algorithm as per the Microsoft spec works in a
218 * “big-endian” manner, using the MSBs of the key to hash the
219 * initial bytes of the input going on to use up the lower order bits
220 * of the key to hash further bytes of the input until the LSBs of the
221 * key are used finally.
222 *
223 * So first, rightshift 320-bit input key 5 times to get 315 MS bits
224 */
225 key_bitwise_shift_left(key_bytes, HAL_FST_HASH_KEY_SIZE_BYTES, 5);
226 key_reverse(fst->shifted_key, key_bytes, HAL_FST_HASH_KEY_SIZE_BYTES);
227 }
228 #else
hal_rx_fst_key_configure(struct hal_rx_fst * fst)229 static void hal_rx_fst_key_configure(struct hal_rx_fst *fst)
230 {
231 }
232 #endif
233
234 /**
235 * hal_rx_fst_get_base() - Retrieve the virtual base address of the Rx FST
236 * @fst: Pointer to the Rx Flow Search Table
237 *
238 * Return: Success/Failure
239 */
hal_rx_fst_get_base(struct hal_rx_fst * fst)240 static inline void *hal_rx_fst_get_base(struct hal_rx_fst *fst)
241 {
242 return fst->base_vaddr;
243 }
244
245 /**
246 * hal_rx_fst_get_fse_size() - Retrieve the size of each entry(flow) in Rx FST
247 * @hal_soc_hdl: HAL SOC handle
248 *
249 * Return: size of each entry/flow in Rx FST
250 */
251 static inline uint32_t
hal_rx_fst_get_fse_size(hal_soc_handle_t hal_soc_hdl)252 hal_rx_fst_get_fse_size(hal_soc_handle_t hal_soc_hdl)
253 {
254 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
255
256 if (hal_soc->ops->hal_rx_fst_get_fse_size)
257 return hal_soc->ops->hal_rx_fst_get_fse_size();
258
259 return 0;
260 }
261
262 /**
263 * hal_rx_flow_get_tuple_info() - Get a flow search entry in HW FST
264 * @hal_soc_hdl: HAL SOC handle
265 * @fst: Pointer to the Rx Flow Search Table
266 * @hal_hash: HAL 5 tuple hash
267 * @tuple_info: 5-tuple info of the flow returned to the caller
268 *
269 * Return: Success/Failure
270 */
271 void *
hal_rx_flow_get_tuple_info(hal_soc_handle_t hal_soc_hdl,struct hal_rx_fst * fst,uint32_t hal_hash,struct hal_flow_tuple_info * tuple_info)272 hal_rx_flow_get_tuple_info(hal_soc_handle_t hal_soc_hdl,
273 struct hal_rx_fst *fst,
274 uint32_t hal_hash,
275 struct hal_flow_tuple_info *tuple_info)
276 {
277 struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
278
279 if (hal_soc->ops->hal_rx_flow_get_tuple_info)
280 return hal_soc->ops->hal_rx_flow_get_tuple_info(
281 (uint8_t *)fst,
282 hal_hash,
283 (uint8_t *)tuple_info);
284
285 return NULL;
286 }
287
288 #ifndef WLAN_SUPPORT_RX_FISA
289 /**
290 * hal_flow_toeplitz_create_cache() - Calculate hashes for each possible
291 * byte value with the key taken as is
292 * @fst: FST Handle
293 *
294 * Return: None
295 */
hal_flow_toeplitz_create_cache(struct hal_rx_fst * fst)296 static void hal_flow_toeplitz_create_cache(struct hal_rx_fst *fst)
297 {
298 int bit;
299 int val;
300 int i;
301 uint8_t *key = fst->shifted_key;
302
303 /*
304 * Initialise to first 32 bits of the key; shift in further key material
305 * through the loop
306 */
307 uint32_t cur_key = (key[0] << 24) | (key[1] << 16) | (key[2] << 8) |
308 key[3];
309
310 for (i = 0; i < HAL_FST_HASH_KEY_SIZE_BYTES; i++) {
311 uint8_t new_key_byte;
312 uint32_t shifted_key[8];
313
314 if (i + 4 < HAL_FST_HASH_KEY_SIZE_BYTES)
315 new_key_byte = key[i + 4];
316 else
317 new_key_byte = 0;
318
319 shifted_key[0] = cur_key;
320
321 for (bit = 1; bit < 8; bit++) {
322 /*
323 * For each iteration, shift out one more bit of the
324 * current key and shift in one more bit of the new key
325 * material
326 */
327 shifted_key[bit] = cur_key << bit |
328 new_key_byte >> (8 - bit);
329 }
330
331 for (val = 0; val < (1 << 8); val++) {
332 uint32_t hash = 0;
333 int mask;
334
335 /*
336 * For each bit set in the input, XOR in
337 * the appropriately shifted key
338 */
339 for (bit = 0, mask = 1 << 7; bit < 8; bit++, mask >>= 1)
340 if ((val & mask))
341 hash ^= shifted_key[bit];
342
343 fst->key_cache[i][val] = hash;
344 }
345
346 cur_key = cur_key << 8 | new_key_byte;
347 }
348 }
349 #else
hal_flow_toeplitz_create_cache(struct hal_rx_fst * fst)350 static void hal_flow_toeplitz_create_cache(struct hal_rx_fst *fst)
351 {
352 }
353 #endif
354
355 struct hal_rx_fst *
hal_rx_fst_attach(hal_soc_handle_t hal_soc_hdl,qdf_device_t qdf_dev,uint64_t * hal_fst_base_paddr,uint16_t max_entries,uint16_t max_search,uint8_t * hash_key,uint64_t fst_cmem_base)356 hal_rx_fst_attach(hal_soc_handle_t hal_soc_hdl,
357 qdf_device_t qdf_dev,
358 uint64_t *hal_fst_base_paddr, uint16_t max_entries,
359 uint16_t max_search, uint8_t *hash_key,
360 uint64_t fst_cmem_base)
361 {
362 struct hal_rx_fst *fst = qdf_mem_malloc(sizeof(struct hal_rx_fst));
363 uint32_t fst_entry_size;
364
365 if (!fst) {
366 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
367 FL("hal fst allocation failed"));
368 return NULL;
369 }
370
371 qdf_mem_set(fst, sizeof(struct hal_rx_fst), 0);
372
373 fst->key = hash_key;
374 fst->max_skid_length = max_search;
375 fst->max_entries = max_entries;
376 fst->hash_mask = max_entries - 1;
377
378 fst_entry_size = hal_rx_fst_get_fse_size(hal_soc_hdl);
379 fst->fst_entry_size = fst_entry_size;
380
381 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
382 "HAL FST allocation %pK %d * %d\n", fst,
383 fst->max_entries, fst_entry_size);
384 qdf_ssr_driver_dump_register_region("hal_rx_fst", fst, sizeof(*fst));
385
386 if (fst_cmem_base == 0) {
387 /* FST is in DDR */
388 fst->base_vaddr = (uint8_t *)qdf_mem_alloc_consistent(qdf_dev,
389 qdf_dev->dev,
390 (fst->max_entries * fst_entry_size),
391 &fst->base_paddr);
392
393 if (!fst->base_vaddr) {
394 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
395 FL("hal fst->base_vaddr allocation failed"));
396 qdf_mem_free(fst);
397 return NULL;
398 }
399 qdf_ssr_driver_dump_register_region("dp_fisa_hw_fse_table",
400 fst->base_vaddr,
401 (fst->max_entries *
402 fst_entry_size));
403
404 *hal_fst_base_paddr = (uint64_t)fst->base_paddr;
405 } else {
406 *hal_fst_base_paddr = fst_cmem_base;
407 goto out;
408 }
409
410 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
411 "hal_rx_fst base address 0x%pK", (void *)fst->base_paddr);
412
413 QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG,
414 (void *)fst->key, HAL_FST_HASH_KEY_SIZE_BYTES);
415
416 qdf_mem_set((uint8_t *)fst->base_vaddr,
417 (fst->max_entries * fst_entry_size), 0);
418
419 out:
420 hal_rx_fst_key_configure(fst);
421 hal_flow_toeplitz_create_cache(fst);
422
423 return fst;
424 }
425 qdf_export_symbol(hal_rx_fst_attach);
426
hal_rx_fst_detach(hal_soc_handle_t hal_soc_hdl,struct hal_rx_fst * rx_fst,qdf_device_t qdf_dev,uint64_t fst_cmem_base)427 void hal_rx_fst_detach(hal_soc_handle_t hal_soc_hdl, struct hal_rx_fst *rx_fst,
428 qdf_device_t qdf_dev, uint64_t fst_cmem_base)
429 {
430
431 if (!rx_fst || !qdf_dev)
432 return;
433
434 qdf_ssr_driver_dump_unregister_region("hal_rx_fst");
435
436 if (fst_cmem_base == 0 && rx_fst->base_vaddr) {
437 qdf_ssr_driver_dump_unregister_region("dp_fisa_hw_fse_table");
438 qdf_mem_free_consistent(qdf_dev, qdf_dev->dev,
439 rx_fst->max_entries *
440 rx_fst->fst_entry_size,
441 rx_fst->base_vaddr, rx_fst->base_paddr,
442 0);
443 }
444
445 qdf_mem_free(rx_fst);
446 }
447 qdf_export_symbol(hal_rx_fst_detach);
448
449 #ifndef WLAN_SUPPORT_RX_FISA
450 uint32_t
hal_flow_toeplitz_hash(void * hal_fst,struct hal_rx_flow * flow)451 hal_flow_toeplitz_hash(void *hal_fst, struct hal_rx_flow *flow)
452 {
453 int i, j;
454 uint32_t hash = 0;
455 struct hal_rx_fst *fst = (struct hal_rx_fst *)hal_fst;
456 uint32_t input[HAL_FST_HASH_KEY_SIZE_WORDS];
457 uint8_t *tuple;
458
459 qdf_mem_zero(input, HAL_FST_HASH_KEY_SIZE_BYTES);
460 *(uint32_t *)&input[0] = qdf_htonl(flow->tuple_info.src_ip_127_96);
461 *(uint32_t *)&input[1] = qdf_htonl(flow->tuple_info.src_ip_95_64);
462 *(uint32_t *)&input[2] = qdf_htonl(flow->tuple_info.src_ip_63_32);
463 *(uint32_t *)&input[3] = qdf_htonl(flow->tuple_info.src_ip_31_0);
464 *(uint32_t *)&input[4] = qdf_htonl(flow->tuple_info.dest_ip_127_96);
465 *(uint32_t *)&input[5] = qdf_htonl(flow->tuple_info.dest_ip_95_64);
466 *(uint32_t *)&input[6] = qdf_htonl(flow->tuple_info.dest_ip_63_32);
467 *(uint32_t *)&input[7] = qdf_htonl(flow->tuple_info.dest_ip_31_0);
468 *(uint32_t *)&input[8] = (flow->tuple_info.dest_port << 16) |
469 (flow->tuple_info.src_port);
470 *(uint32_t *)&input[9] = flow->tuple_info.l4_protocol;
471
472 tuple = (uint8_t *)input;
473 QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
474 tuple, sizeof(input));
475 for (i = 0, j = HAL_FST_HASH_DATA_SIZE - 1;
476 i < HAL_FST_HASH_KEY_SIZE_BYTES && j >= 0; i++, j--) {
477 hash ^= fst->key_cache[i][tuple[j]];
478 }
479
480 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
481 "Hash value %u %u truncated hash %u\n", hash,
482 (hash >> 12), (hash >> 12) % (fst->max_entries));
483
484 hash >>= 12;
485 hash &= (fst->max_entries - 1);
486
487 return hash;
488 }
489 #else
490 uint32_t
hal_flow_toeplitz_hash(void * hal_fst,struct hal_rx_flow * flow)491 hal_flow_toeplitz_hash(void *hal_fst, struct hal_rx_flow *flow)
492 {
493 return 0;
494 }
495 #endif
496 qdf_export_symbol(hal_flow_toeplitz_hash);
497
hal_rx_get_hal_hash(struct hal_rx_fst * hal_fst,uint32_t flow_hash)498 uint32_t hal_rx_get_hal_hash(struct hal_rx_fst *hal_fst, uint32_t flow_hash)
499 {
500 uint32_t trunc_hash = flow_hash;
501
502 /* Take care of hash wrap around scenario */
503 if (flow_hash >= hal_fst->max_entries)
504 trunc_hash &= hal_fst->hash_mask;
505 return trunc_hash;
506 }
507 qdf_export_symbol(hal_rx_get_hal_hash);
508
509 QDF_STATUS
hal_rx_insert_flow_entry(hal_soc_handle_t hal_soc,struct hal_rx_fst * fst,uint32_t flow_hash,void * flow_tuple_info,uint32_t * flow_idx)510 hal_rx_insert_flow_entry(hal_soc_handle_t hal_soc,
511 struct hal_rx_fst *fst, uint32_t flow_hash,
512 void *flow_tuple_info, uint32_t *flow_idx)
513 {
514 int i;
515 void *hal_fse = NULL;
516 uint32_t hal_hash = 0;
517 struct hal_flow_tuple_info hal_tuple_info = { 0 };
518
519 for (i = 0; i < fst->max_skid_length; i++) {
520 hal_hash = hal_rx_get_hal_hash(fst, (flow_hash + i));
521
522 hal_fse = hal_rx_flow_get_tuple_info(hal_soc, fst, hal_hash,
523 &hal_tuple_info);
524 if (!hal_fse)
525 break;
526
527 /* Find the matching flow entry in HW FST */
528 if (!qdf_mem_cmp(&hal_tuple_info,
529 flow_tuple_info,
530 sizeof(struct hal_flow_tuple_info))) {
531 dp_err("Duplicate flow entry in FST %u at skid %u ",
532 hal_hash, i);
533 return QDF_STATUS_E_EXISTS;
534 }
535 }
536 if (i == fst->max_skid_length) {
537 dp_err("Max skid length reached for hash %u", flow_hash);
538 return QDF_STATUS_E_RANGE;
539 }
540 *flow_idx = hal_hash;
541 dp_info("flow_hash = %u, skid_entry = %d, flow_addr = %pK flow_idx = %d",
542 flow_hash, i, hal_fse, *flow_idx);
543
544 return QDF_STATUS_SUCCESS;
545 }
546 qdf_export_symbol(hal_rx_insert_flow_entry);
547
548 QDF_STATUS
hal_rx_find_flow_from_tuple(hal_soc_handle_t hal_soc_hdl,struct hal_rx_fst * fst,uint32_t flow_hash,void * flow_tuple_info,uint32_t * flow_idx)549 hal_rx_find_flow_from_tuple(hal_soc_handle_t hal_soc_hdl,
550 struct hal_rx_fst *fst, uint32_t flow_hash,
551 void *flow_tuple_info, uint32_t *flow_idx)
552 {
553 int i;
554 void *hal_fse = NULL;
555 uint32_t hal_hash = 0;
556 struct hal_flow_tuple_info hal_tuple_info = { 0 };
557
558 for (i = 0; i < fst->max_skid_length; i++) {
559 hal_hash = hal_rx_get_hal_hash(fst, (flow_hash + i));
560
561 hal_fse = hal_rx_flow_get_tuple_info(hal_soc_hdl, fst, hal_hash,
562 &hal_tuple_info);
563 if (!hal_fse)
564 continue;
565
566 /* Find the matching flow entry in HW FST */
567 if (!qdf_mem_cmp(&hal_tuple_info,
568 flow_tuple_info,
569 sizeof(struct hal_flow_tuple_info))) {
570 break;
571 }
572 }
573
574 if (i == fst->max_skid_length) {
575 dp_err("Max skid length reached for hash %u", flow_hash);
576 return QDF_STATUS_E_RANGE;
577 }
578
579 *flow_idx = hal_hash;
580 dp_info("flow_hash = %u, skid_entry = %d, flow_addr = %pK flow_idx = %d",
581 flow_hash, i, hal_fse, *flow_idx);
582
583 return QDF_STATUS_SUCCESS;
584 }
585 qdf_export_symbol(hal_rx_find_flow_from_tuple);
586