1 /*
2 * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include "ce_api.h"
21 #include "ce_internal.h"
22 #include "ce_main.h"
23 #include "ce_reg.h"
24 #include "hif.h"
25 #include "hif_debug.h"
26 #include "hif_io32.h"
27 #include "qdf_lock.h"
28 #include "hif_main.h"
29 #include "hif_napi.h"
30 #include "qdf_module.h"
31 #include "regtable.h"
32
33 /*
34 * Support for Copy Engine hardware, which is mainly used for
35 * communication between Host and Target over a PCIe interconnect.
36 */
37
38 /*
39 * A single CopyEngine (CE) comprises two "rings":
40 * a source ring
41 * a destination ring
42 *
43 * Each ring consists of a number of descriptors which specify
44 * an address, length, and meta-data.
45 *
46 * Typically, one side of the PCIe interconnect (Host or Target)
47 * controls one ring and the other side controls the other ring.
48 * The source side chooses when to initiate a transfer and it
49 * chooses what to send (buffer address, length). The destination
50 * side keeps a supply of "anonymous receive buffers" available and
51 * it handles incoming data as it arrives (when the destination
52 * receives an interrupt).
53 *
54 * The sender may send a simple buffer (address/length) or it may
55 * send a small list of buffers. When a small list is sent, hardware
56 * "gathers" these and they end up in a single destination buffer
57 * with a single interrupt.
58 *
59 * There are several "contexts" managed by this layer -- more, it
60 * may seem -- than should be needed. These are provided mainly for
61 * maximum flexibility and especially to facilitate a simpler HIF
62 * implementation. There are per-CopyEngine recv, send, and watermark
63 * contexts. These are supplied by the caller when a recv, send,
64 * or watermark handler is established and they are echoed back to
65 * the caller when the respective callbacks are invoked. There is
66 * also a per-transfer context supplied by the caller when a buffer
67 * (or sendlist) is sent and when a buffer is enqueued for recv.
68 * These per-transfer contexts are echoed back to the caller when
69 * the buffer is sent/received.
70 * Target TX harsh result toeplitz_hash_result
71 */
72
73 /* NB: Modeled after ce_completed_send_next */
74 /* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
75 #define CE_WM_SHFT 1
76
77 #ifdef WLAN_FEATURE_FASTPATH
78 #ifdef QCA_WIFI_3_0
79 static inline void
ce_buffer_addr_hi_set(struct CE_src_desc * shadow_src_desc,uint64_t dma_addr,uint32_t user_flags)80 ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
81 uint64_t dma_addr,
82 uint32_t user_flags)
83 {
84 shadow_src_desc->buffer_addr_hi =
85 (uint32_t)((dma_addr >> 32) & CE_RING_BASE_ADDR_HIGH_MASK);
86 user_flags |= shadow_src_desc->buffer_addr_hi;
87 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
88 sizeof(uint32_t));
89 }
90 #else
91 static inline void
ce_buffer_addr_hi_set(struct CE_src_desc * shadow_src_desc,uint64_t dma_addr,uint32_t user_flags)92 ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
93 uint64_t dma_addr,
94 uint32_t user_flags)
95 {
96 }
97 #endif
98
99 #define SLOTS_PER_DATAPATH_TX 2
100
101 /**
102 * ce_send_fast() - CE layer Tx buffer posting function
103 * @copyeng: copy engine handle
104 * @msdu: msdu to be sent
105 * @transfer_id: transfer_id
106 * @download_len: packet download length
107 *
108 * Assumption : Called with an array of MSDU's
109 * Function:
110 * For each msdu in the array
111 * 1. Check no. of available entries
112 * 2. Create src ring entries (allocated in consistent memory
113 * 3. Write index to h/w
114 *
115 * Return: No. of packets that could be sent
116 */
ce_send_fast(struct CE_handle * copyeng,qdf_nbuf_t msdu,unsigned int transfer_id,uint32_t download_len)117 int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
118 unsigned int transfer_id, uint32_t download_len)
119 {
120 struct CE_state *ce_state = (struct CE_state *)copyeng;
121 struct hif_softc *scn = ce_state->scn;
122 struct CE_ring_state *src_ring = ce_state->src_ring;
123 u_int32_t ctrl_addr = ce_state->ctrl_addr;
124 unsigned int nentries_mask = src_ring->nentries_mask;
125 unsigned int write_index;
126 unsigned int sw_index;
127 unsigned int frag_len;
128 uint64_t dma_addr;
129 uint32_t user_flags;
130 enum hif_ce_event_type type = FAST_TX_SOFTWARE_INDEX_UPDATE;
131 bool ok_to_send = true;
132
133 /*
134 * Create a log assuming the call will go through, and if not, we would
135 * add an error trace as well.
136 * Please add the same failure log for any additional error paths.
137 */
138 DPTRACE(qdf_dp_trace(msdu,
139 QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD,
140 QDF_TRACE_DEFAULT_PDEV_ID,
141 qdf_nbuf_data_addr(msdu),
142 sizeof(qdf_nbuf_data(msdu)), QDF_TX));
143
144 qdf_spin_lock_bh(&ce_state->ce_index_lock);
145
146 /*
147 * Request runtime PM resume if it has already suspended and make
148 * sure there is no PCIe link access.
149 */
150 if (hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_CE) != 0)
151 ok_to_send = false;
152
153 if (ok_to_send) {
154 Q_TARGET_ACCESS_BEGIN(scn);
155 DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
156 }
157
158 write_index = src_ring->write_index;
159 sw_index = src_ring->sw_index;
160 hif_record_ce_desc_event(scn, ce_state->id,
161 FAST_TX_SOFTWARE_INDEX_UPDATE,
162 NULL, NULL, sw_index, 0);
163
164 if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1)
165 < SLOTS_PER_DATAPATH_TX)) {
166 hif_err_rl("Source ring full, required %d, available %d",
167 SLOTS_PER_DATAPATH_TX,
168 CE_RING_DELTA(nentries_mask, write_index,
169 sw_index - 1));
170 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
171 if (ok_to_send)
172 Q_TARGET_ACCESS_END(scn);
173 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
174
175 DPTRACE(qdf_dp_trace(NULL,
176 QDF_DP_TRACE_CE_FAST_PACKET_ERR_RECORD,
177 QDF_TRACE_DEFAULT_PDEV_ID,
178 NULL, 0, QDF_TX));
179
180 return 0;
181 }
182
183 {
184 struct CE_src_desc *src_ring_base =
185 (struct CE_src_desc *)src_ring->base_addr_owner_space;
186 struct CE_src_desc *shadow_base =
187 (struct CE_src_desc *)src_ring->shadow_base;
188 struct CE_src_desc *src_desc =
189 CE_SRC_RING_TO_DESC(src_ring_base, write_index);
190 struct CE_src_desc *shadow_src_desc =
191 CE_SRC_RING_TO_DESC(shadow_base, write_index);
192
193 /*
194 * First fill out the ring descriptor for the HTC HTT frame
195 * header. These are uncached writes. Should we use a local
196 * structure instead?
197 */
198 /* HTT/HTC header can be passed as a argument */
199 dma_addr = qdf_nbuf_get_frag_paddr(msdu, 0);
200 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
201 0xFFFFFFFF);
202 user_flags = qdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK;
203 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
204 shadow_src_desc->meta_data = transfer_id;
205 shadow_src_desc->nbytes = qdf_nbuf_get_frag_len(msdu, 0);
206 ce_validate_nbytes(shadow_src_desc->nbytes, ce_state);
207 download_len -= shadow_src_desc->nbytes;
208 /*
209 * HTC HTT header is a word stream, so byte swap if CE byte
210 * swap enabled
211 */
212 shadow_src_desc->byte_swap = ((ce_state->attr_flags &
213 CE_ATTR_BYTE_SWAP_DATA) != 0);
214 /* For the first one, it still does not need to write */
215 shadow_src_desc->gather = 1;
216 *src_desc = *shadow_src_desc;
217 /* By default we could initialize the transfer context to this
218 * value
219 */
220 src_ring->per_transfer_context[write_index] =
221 CE_SENDLIST_ITEM_CTXT;
222 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
223
224 src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index);
225 shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index);
226 /*
227 * Now fill out the ring descriptor for the actual data
228 * packet
229 */
230 dma_addr = qdf_nbuf_get_frag_paddr(msdu, 1);
231 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
232 0xFFFFFFFF);
233 /*
234 * Clear packet offset for all but the first CE desc.
235 */
236 user_flags &= ~CE_DESC_PKT_OFFSET_BIT_M;
237 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
238 shadow_src_desc->meta_data = transfer_id;
239
240 /* get actual packet length */
241 frag_len = qdf_nbuf_get_frag_len(msdu, 1);
242
243 /* download remaining bytes of payload */
244 shadow_src_desc->nbytes = download_len;
245 ce_validate_nbytes(shadow_src_desc->nbytes, ce_state);
246 if (shadow_src_desc->nbytes > frag_len)
247 shadow_src_desc->nbytes = frag_len;
248
249 /* Data packet is a byte stream, so disable byte swap */
250 shadow_src_desc->byte_swap = 0;
251 /* For the last one, gather is not set */
252 shadow_src_desc->gather = 0;
253 *src_desc = *shadow_src_desc;
254 src_ring->per_transfer_context[write_index] = msdu;
255
256 hif_record_ce_desc_event(scn, ce_state->id, type,
257 (union ce_desc *)src_desc,
258 src_ring->per_transfer_context[write_index],
259 write_index, shadow_src_desc->nbytes);
260
261 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
262
263 DPTRACE(qdf_dp_trace(msdu,
264 QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD,
265 QDF_TRACE_DEFAULT_PDEV_ID,
266 qdf_nbuf_data_addr(msdu),
267 sizeof(qdf_nbuf_data(msdu)), QDF_TX));
268 }
269
270 src_ring->write_index = write_index;
271
272 if (ok_to_send) {
273 if (qdf_likely(ce_state->state == CE_RUNNING)) {
274 type = FAST_TX_WRITE_INDEX_UPDATE;
275 war_ce_src_ring_write_idx_set(scn, ctrl_addr,
276 write_index);
277 Q_TARGET_ACCESS_END(scn);
278 } else {
279 ce_state->state = CE_PENDING;
280 }
281 hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_CE);
282 }
283
284 qdf_spin_unlock_bh(&ce_state->ce_index_lock);
285
286 /* sent 1 packet */
287 return 1;
288 }
289
290 /**
291 * ce_fastpath_rx_handle() - Updates write_index and calls fastpath msg handler
292 * @ce_state: handle to copy engine state
293 * @cmpl_msdus: Rx msdus
294 * @num_cmpls: number of Rx msdus
295 * @ctrl_addr: CE control address
296 *
297 * Return: None
298 */
ce_fastpath_rx_handle(struct CE_state * ce_state,qdf_nbuf_t * cmpl_msdus,uint32_t num_cmpls,uint32_t ctrl_addr)299 static void ce_fastpath_rx_handle(struct CE_state *ce_state,
300 qdf_nbuf_t *cmpl_msdus, uint32_t num_cmpls,
301 uint32_t ctrl_addr)
302 {
303 struct hif_softc *scn = ce_state->scn;
304 struct CE_ring_state *dest_ring = ce_state->dest_ring;
305 uint32_t nentries_mask = dest_ring->nentries_mask;
306 uint32_t write_index;
307
308 qdf_spin_unlock(&ce_state->ce_index_lock);
309 ce_state->fastpath_handler(ce_state->context, cmpl_msdus, num_cmpls);
310 qdf_spin_lock(&ce_state->ce_index_lock);
311
312 /* Update Destination Ring Write Index */
313 write_index = dest_ring->write_index;
314 write_index = CE_RING_IDX_ADD(nentries_mask, write_index, num_cmpls);
315
316 hif_record_ce_desc_event(scn, ce_state->id,
317 FAST_RX_WRITE_INDEX_UPDATE,
318 NULL, NULL, write_index, 0);
319
320 CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
321 dest_ring->write_index = write_index;
322 }
323
324 /**
325 * ce_per_engine_service_fast() - CE handler routine to service fastpath msgs
326 * @scn: hif_context
327 * @ce_id: Copy engine ID
328 * 1) Go through the CE ring, and find the completions
329 * 2) For valid completions retrieve context (nbuf) for per_transfer_context[]
330 * 3) Unmap buffer & accumulate in an array.
331 * 4) Call message handler when array is full or when exiting the handler
332 *
333 * Return: void
334 */
335
ce_per_engine_service_fast(struct hif_softc * scn,int ce_id)336 void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
337 {
338 struct CE_state *ce_state = scn->ce_id_to_state[ce_id];
339 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
340 struct CE_ring_state *dest_ring = ce_state->dest_ring;
341 struct CE_dest_desc *dest_ring_base =
342 (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
343
344 uint32_t nentries_mask = dest_ring->nentries_mask;
345 uint32_t sw_index = dest_ring->sw_index;
346 uint32_t nbytes;
347 qdf_nbuf_t nbuf;
348 dma_addr_t paddr;
349 struct CE_dest_desc *dest_desc;
350 qdf_nbuf_t cmpl_msdus[MSG_FLUSH_NUM];
351 uint32_t ctrl_addr = ce_state->ctrl_addr;
352 uint32_t nbuf_cmpl_idx = 0;
353 unsigned int more_comp_cnt = 0;
354 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
355 struct ce_ops *ce_services = hif_state->ce_services;
356
357 more_data:
358 for (;;) {
359 dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base,
360 sw_index);
361
362 /*
363 * The following 2 reads are from non-cached memory
364 */
365 nbytes = dest_desc->nbytes;
366
367 /* If completion is invalid, break */
368 if (qdf_unlikely(nbytes == 0))
369 break;
370
371 /*
372 * Build the nbuf list from valid completions
373 */
374 nbuf = dest_ring->per_transfer_context[sw_index];
375
376 /*
377 * No lock is needed here, since this is the only thread
378 * that accesses the sw_index
379 */
380 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
381
382 /*
383 * CAREFUL : Uncached write, but still less expensive,
384 * since most modern caches use "write-combining" to
385 * flush multiple cache-writes all at once.
386 */
387 dest_desc->nbytes = 0;
388
389 /*
390 * Per our understanding this is not required on our
391 * since we are doing the same cache invalidation
392 * operation on the same buffer twice in succession,
393 * without any modifiication to this buffer by CPU in
394 * between.
395 * However, this code with 2 syncs in succession has
396 * been undergoing some testing at a customer site,
397 * and seemed to be showing no problems so far. Would
398 * like to validate from the customer, that this line
399 * is really not required, before we remove this line
400 * completely.
401 */
402 paddr = QDF_NBUF_CB_PADDR(nbuf);
403
404 qdf_mem_dma_sync_single_for_cpu(scn->qdf_dev, paddr,
405 (skb_end_pointer(nbuf) -
406 (nbuf)->data),
407 DMA_FROM_DEVICE);
408
409 qdf_nbuf_put_tail(nbuf, nbytes);
410
411 qdf_assert_always(nbuf->data);
412
413 QDF_NBUF_CB_RX_CTX_ID(nbuf) =
414 hif_get_rx_ctx_id(ce_state->id, hif_hdl);
415 cmpl_msdus[nbuf_cmpl_idx++] = nbuf;
416
417 /*
418 * we are not posting the buffers back instead
419 * reusing the buffers
420 */
421 if (nbuf_cmpl_idx == scn->ce_service_max_rx_ind_flush) {
422 hif_record_ce_desc_event(scn, ce_state->id,
423 FAST_RX_SOFTWARE_INDEX_UPDATE,
424 NULL, NULL, sw_index, 0);
425 dest_ring->sw_index = sw_index;
426 ce_fastpath_rx_handle(ce_state, cmpl_msdus,
427 nbuf_cmpl_idx, ctrl_addr);
428
429 ce_state->receive_count += nbuf_cmpl_idx;
430 if (qdf_unlikely(hif_ce_service_should_yield(
431 scn, ce_state))) {
432 ce_state->force_break = 1;
433 qdf_atomic_set(&ce_state->rx_pending, 1);
434 return;
435 }
436
437 nbuf_cmpl_idx = 0;
438 more_comp_cnt = 0;
439 }
440 }
441
442 hif_record_ce_desc_event(scn, ce_state->id,
443 FAST_RX_SOFTWARE_INDEX_UPDATE,
444 NULL, NULL, sw_index, 0);
445
446 dest_ring->sw_index = sw_index;
447
448 /*
449 * If there are not enough completions to fill the array,
450 * just call the message handler here
451 */
452 if (nbuf_cmpl_idx) {
453 ce_fastpath_rx_handle(ce_state, cmpl_msdus,
454 nbuf_cmpl_idx, ctrl_addr);
455
456 ce_state->receive_count += nbuf_cmpl_idx;
457 if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
458 ce_state->force_break = 1;
459 qdf_atomic_set(&ce_state->rx_pending, 1);
460 return;
461 }
462
463 /* check for more packets after upper layer processing */
464 nbuf_cmpl_idx = 0;
465 more_comp_cnt = 0;
466 goto more_data;
467 }
468
469 hif_update_napi_max_poll_time(ce_state, ce_id, qdf_get_cpu());
470
471 qdf_atomic_set(&ce_state->rx_pending, 0);
472 if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
473 if (!ce_state->msi_supported)
474 CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
475 HOST_IS_COPY_COMPLETE_MASK);
476 } else {
477 hif_err_rl("Target access is not allowed");
478 return;
479 }
480
481 if (ce_services->ce_recv_entries_done_nolock(scn, ce_state)) {
482 if (more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
483 goto more_data;
484 } else {
485 hif_err("Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
486 nentries_mask,
487 ce_state->dest_ring->sw_index,
488 CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr));
489 }
490 }
491 #ifdef NAPI_YIELD_BUDGET_BASED
492 /*
493 * Caution : Before you modify this code, please refer hif_napi_poll
494 * function to understand how napi_complete gets called and make the
495 * necessary changes. Force break has to be done till WIN disables the
496 * interrupt at source
497 */
498 ce_state->force_break = 1;
499 #endif
500 }
501
502 /**
503 * ce_is_fastpath_enabled() - returns true if fastpath mode is enabled
504 * @scn: Handle to HIF context
505 *
506 * Return: true if fastpath is enabled else false.
507 */
ce_is_fastpath_enabled(struct hif_softc * scn)508 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
509 {
510 return scn->fastpath_mode_on;
511 }
512 #else
ce_per_engine_service_fast(struct hif_softc * scn,int ce_id)513 void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
514 {
515 }
516
ce_is_fastpath_enabled(struct hif_softc * scn)517 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
518 {
519 return false;
520 }
521 #endif /* WLAN_FEATURE_FASTPATH */
522
523 static QDF_STATUS
ce_send_nolock_legacy(struct CE_handle * copyeng,void * per_transfer_context,qdf_dma_addr_t buffer,uint32_t nbytes,uint32_t transfer_id,uint32_t flags,uint32_t user_flags)524 ce_send_nolock_legacy(struct CE_handle *copyeng,
525 void *per_transfer_context,
526 qdf_dma_addr_t buffer,
527 uint32_t nbytes,
528 uint32_t transfer_id,
529 uint32_t flags,
530 uint32_t user_flags)
531 {
532 QDF_STATUS status;
533 struct CE_state *CE_state = (struct CE_state *)copyeng;
534 struct CE_ring_state *src_ring = CE_state->src_ring;
535 uint32_t ctrl_addr = CE_state->ctrl_addr;
536 unsigned int nentries_mask = src_ring->nentries_mask;
537 unsigned int sw_index = src_ring->sw_index;
538 unsigned int write_index = src_ring->write_index;
539 uint64_t dma_addr = buffer;
540 struct hif_softc *scn = CE_state->scn;
541
542 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
543 return QDF_STATUS_E_FAILURE;
544 if (unlikely(CE_RING_DELTA(nentries_mask,
545 write_index, sw_index - 1) <= 0)) {
546 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
547 Q_TARGET_ACCESS_END(scn);
548 return QDF_STATUS_E_FAILURE;
549 }
550 {
551 enum hif_ce_event_type event_type;
552 struct CE_src_desc *src_ring_base =
553 (struct CE_src_desc *)src_ring->base_addr_owner_space;
554 struct CE_src_desc *shadow_base =
555 (struct CE_src_desc *)src_ring->shadow_base;
556 struct CE_src_desc *src_desc =
557 CE_SRC_RING_TO_DESC(src_ring_base, write_index);
558 struct CE_src_desc *shadow_src_desc =
559 CE_SRC_RING_TO_DESC(shadow_base, write_index);
560
561 /* Update low 32 bits source descriptor address */
562 shadow_src_desc->buffer_addr =
563 (uint32_t)(dma_addr & 0xFFFFFFFF);
564
565 #ifdef QCA_WIFI_3_0
566 shadow_src_desc->buffer_addr_hi =
567 (uint32_t)((dma_addr >> 32) &
568 CE_RING_BASE_ADDR_HIGH_MASK);
569 user_flags |= shadow_src_desc->buffer_addr_hi;
570 memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
571 sizeof(uint32_t));
572 #endif
573 shadow_src_desc->target_int_disable = 0;
574 shadow_src_desc->host_int_disable = 0;
575
576 shadow_src_desc->meta_data = transfer_id;
577
578 /*
579 * Set the swap bit if:
580 * typical sends on this CE are swapped (host is big-endian)
581 * and this send doesn't disable the swapping
582 * (data is not bytestream)
583 */
584 shadow_src_desc->byte_swap =
585 (((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
586 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
587 shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
588 shadow_src_desc->nbytes = nbytes;
589 ce_validate_nbytes(nbytes, CE_state);
590
591 *src_desc = *shadow_src_desc;
592
593 src_ring->per_transfer_context[write_index] =
594 per_transfer_context;
595
596 /* Update Source Ring Write Index */
597 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
598
599 /* WORKAROUND */
600 if (shadow_src_desc->gather) {
601 event_type = HIF_TX_GATHER_DESC_POST;
602 } else if (qdf_unlikely(CE_state->state != CE_RUNNING)) {
603 event_type = HIF_TX_DESC_SOFTWARE_POST;
604 CE_state->state = CE_PENDING;
605 } else {
606 event_type = HIF_TX_DESC_POST;
607 war_ce_src_ring_write_idx_set(scn, ctrl_addr,
608 write_index);
609 }
610
611 /* src_ring->write index hasn't been updated event though
612 * the register has already been written to.
613 */
614 hif_record_ce_desc_event(scn, CE_state->id, event_type,
615 (union ce_desc *)shadow_src_desc, per_transfer_context,
616 src_ring->write_index, nbytes);
617
618 src_ring->write_index = write_index;
619 status = QDF_STATUS_SUCCESS;
620 }
621 Q_TARGET_ACCESS_END(scn);
622 return status;
623 }
624
625 static QDF_STATUS
ce_sendlist_send_legacy(struct CE_handle * copyeng,void * per_transfer_context,struct ce_sendlist * sendlist,unsigned int transfer_id)626 ce_sendlist_send_legacy(struct CE_handle *copyeng,
627 void *per_transfer_context,
628 struct ce_sendlist *sendlist, unsigned int transfer_id)
629 {
630 QDF_STATUS status = QDF_STATUS_E_NOMEM;
631 struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
632 struct CE_state *CE_state = (struct CE_state *)copyeng;
633 struct CE_ring_state *src_ring = CE_state->src_ring;
634 unsigned int nentries_mask = src_ring->nentries_mask;
635 unsigned int num_items = sl->num_items;
636 unsigned int sw_index;
637 unsigned int write_index;
638 struct hif_softc *scn = CE_state->scn;
639
640 QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
641
642 qdf_spin_lock_bh(&CE_state->ce_index_lock);
643
644 if (CE_state->scn->fastpath_mode_on && CE_state->htt_tx_data &&
645 Q_TARGET_ACCESS_BEGIN(scn) == 0) {
646 src_ring->sw_index = CE_SRC_RING_READ_IDX_GET_FROM_DDR(
647 scn, CE_state->ctrl_addr);
648 Q_TARGET_ACCESS_END(scn);
649 }
650
651 sw_index = src_ring->sw_index;
652 write_index = src_ring->write_index;
653
654 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >=
655 num_items) {
656 struct ce_sendlist_item *item;
657 int i;
658
659 /* handle all but the last item uniformly */
660 for (i = 0; i < num_items - 1; i++) {
661 item = &sl->item[i];
662 /* TBDXXX: Support extensible sendlist_types? */
663 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
664 status = ce_send_nolock_legacy(copyeng,
665 CE_SENDLIST_ITEM_CTXT,
666 (qdf_dma_addr_t)item->data,
667 item->u.nbytes, transfer_id,
668 item->flags | CE_SEND_FLAG_GATHER,
669 item->user_flags);
670 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
671 }
672 /* provide valid context pointer for final item */
673 item = &sl->item[i];
674 /* TBDXXX: Support extensible sendlist_types? */
675 QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
676 status = ce_send_nolock_legacy(copyeng, per_transfer_context,
677 (qdf_dma_addr_t) item->data,
678 item->u.nbytes,
679 transfer_id, item->flags,
680 item->user_flags);
681 QDF_ASSERT(status == QDF_STATUS_SUCCESS);
682 QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
683 QDF_NBUF_TX_PKT_CE);
684 DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
685 QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
686 QDF_TRACE_DEFAULT_PDEV_ID,
687 (uint8_t *)&(((qdf_nbuf_t)per_transfer_context)->data),
688 sizeof(((qdf_nbuf_t)per_transfer_context)->data),
689 QDF_TX));
690 } else {
691 /*
692 * Probably not worth the additional complexity to support
693 * partial sends with continuation or notification. We expect
694 * to use large rings and small sendlists. If we can't handle
695 * the entire request at once, punt it back to the caller.
696 */
697 }
698 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
699
700 return status;
701 }
702
703 /**
704 * ce_recv_buf_enqueue_legacy() - enqueue a recv buffer into a copy engine
705 * @copyeng: copy engine handle
706 * @per_recv_context: virtual address of the nbuf
707 * @buffer: physical address of the nbuf
708 *
709 * Return: QDF_STATUS_SUCCESS if the buffer is enqueued
710 */
711 static QDF_STATUS
ce_recv_buf_enqueue_legacy(struct CE_handle * copyeng,void * per_recv_context,qdf_dma_addr_t buffer)712 ce_recv_buf_enqueue_legacy(struct CE_handle *copyeng,
713 void *per_recv_context, qdf_dma_addr_t buffer)
714 {
715 QDF_STATUS status;
716 struct CE_state *CE_state = (struct CE_state *)copyeng;
717 struct CE_ring_state *dest_ring = CE_state->dest_ring;
718 uint32_t ctrl_addr = CE_state->ctrl_addr;
719 unsigned int nentries_mask = dest_ring->nentries_mask;
720 unsigned int write_index;
721 unsigned int sw_index;
722 uint64_t dma_addr = buffer;
723 struct hif_softc *scn = CE_state->scn;
724
725 qdf_spin_lock_bh(&CE_state->ce_index_lock);
726 write_index = dest_ring->write_index;
727 sw_index = dest_ring->sw_index;
728
729 if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
730 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
731 return QDF_STATUS_E_IO;
732 }
733
734 if ((CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) ||
735 (ce_is_fastpath_enabled(scn) && CE_state->htt_rx_data)) {
736 struct CE_dest_desc *dest_ring_base =
737 (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
738 struct CE_dest_desc *dest_desc =
739 CE_DEST_RING_TO_DESC(dest_ring_base, write_index);
740
741 /* Update low 32 bit destination descriptor */
742 dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF);
743 #ifdef QCA_WIFI_3_0
744 dest_desc->buffer_addr_hi =
745 (uint32_t)((dma_addr >> 32) &
746 CE_RING_BASE_ADDR_HIGH_MASK);
747 #endif
748 dest_desc->nbytes = 0;
749
750 dest_ring->per_transfer_context[write_index] =
751 per_recv_context;
752
753 hif_record_ce_desc_event(scn, CE_state->id,
754 HIF_RX_DESC_POST,
755 (union ce_desc *)dest_desc,
756 per_recv_context,
757 write_index, 0);
758
759 /* Update Destination Ring Write Index */
760 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
761 if (write_index != sw_index) {
762 CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
763 dest_ring->write_index = write_index;
764 }
765 status = QDF_STATUS_SUCCESS;
766 } else
767 status = QDF_STATUS_E_FAILURE;
768
769 Q_TARGET_ACCESS_END(scn);
770 qdf_spin_unlock_bh(&CE_state->ce_index_lock);
771 return status;
772 }
773
774 static unsigned int
ce_send_entries_done_nolock_legacy(struct hif_softc * scn,struct CE_state * CE_state)775 ce_send_entries_done_nolock_legacy(struct hif_softc *scn,
776 struct CE_state *CE_state)
777 {
778 struct CE_ring_state *src_ring = CE_state->src_ring;
779 uint32_t ctrl_addr = CE_state->ctrl_addr;
780 unsigned int nentries_mask = src_ring->nentries_mask;
781 unsigned int sw_index;
782 unsigned int read_index;
783
784 sw_index = src_ring->sw_index;
785 read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
786
787 return CE_RING_DELTA(nentries_mask, sw_index, read_index);
788 }
789
790 static unsigned int
ce_recv_entries_done_nolock_legacy(struct hif_softc * scn,struct CE_state * CE_state)791 ce_recv_entries_done_nolock_legacy(struct hif_softc *scn,
792 struct CE_state *CE_state)
793 {
794 struct CE_ring_state *dest_ring = CE_state->dest_ring;
795 uint32_t ctrl_addr = CE_state->ctrl_addr;
796 unsigned int nentries_mask = dest_ring->nentries_mask;
797 unsigned int sw_index;
798 unsigned int read_index;
799
800 sw_index = dest_ring->sw_index;
801 read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
802
803 return CE_RING_DELTA(nentries_mask, sw_index, read_index);
804 }
805
806 static QDF_STATUS
ce_completed_recv_next_nolock_legacy(struct CE_state * CE_state,void ** per_CE_contextp,void ** per_transfer_contextp,qdf_dma_addr_t * bufferp,unsigned int * nbytesp,unsigned int * transfer_idp,unsigned int * flagsp)807 ce_completed_recv_next_nolock_legacy(struct CE_state *CE_state,
808 void **per_CE_contextp,
809 void **per_transfer_contextp,
810 qdf_dma_addr_t *bufferp,
811 unsigned int *nbytesp,
812 unsigned int *transfer_idp,
813 unsigned int *flagsp)
814 {
815 QDF_STATUS status;
816 struct CE_ring_state *dest_ring = CE_state->dest_ring;
817 unsigned int nentries_mask = dest_ring->nentries_mask;
818 unsigned int sw_index = dest_ring->sw_index;
819 struct hif_softc *scn = CE_state->scn;
820 struct CE_dest_desc *dest_ring_base =
821 (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
822 struct CE_dest_desc *dest_desc =
823 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
824 int nbytes;
825 struct CE_dest_desc dest_desc_info;
826 /*
827 * By copying the dest_desc_info element to local memory, we could
828 * avoid extra memory read from non-cachable memory.
829 */
830 dest_desc_info = *dest_desc;
831 nbytes = dest_desc_info.nbytes;
832 if (nbytes == 0) {
833 /*
834 * This closes a relatively unusual race where the Host
835 * sees the updated DRRI before the update to the
836 * corresponding descriptor has completed. We treat this
837 * as a descriptor that is not yet done.
838 */
839 status = QDF_STATUS_E_FAILURE;
840 goto done;
841 }
842
843 hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_COMPLETION,
844 (union ce_desc *)dest_desc,
845 dest_ring->per_transfer_context[sw_index],
846 sw_index, 0);
847
848 dest_desc->nbytes = 0;
849
850 /* Return data from completed destination descriptor */
851 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info);
852 *nbytesp = nbytes;
853 *transfer_idp = dest_desc_info.meta_data;
854 *flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
855
856 if (per_CE_contextp)
857 *per_CE_contextp = CE_state->recv_context;
858
859 if (per_transfer_contextp) {
860 *per_transfer_contextp =
861 dest_ring->per_transfer_context[sw_index];
862 }
863 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
864
865 /* Update sw_index */
866 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
867 dest_ring->sw_index = sw_index;
868 status = QDF_STATUS_SUCCESS;
869
870 done:
871 return status;
872 }
873
874 /* NB: Modeled after ce_completed_recv_next_nolock */
875 static QDF_STATUS
ce_revoke_recv_next_legacy(struct CE_handle * copyeng,void ** per_CE_contextp,void ** per_transfer_contextp,qdf_dma_addr_t * bufferp)876 ce_revoke_recv_next_legacy(struct CE_handle *copyeng,
877 void **per_CE_contextp,
878 void **per_transfer_contextp,
879 qdf_dma_addr_t *bufferp)
880 {
881 struct CE_state *CE_state;
882 struct CE_ring_state *dest_ring;
883 unsigned int nentries_mask;
884 unsigned int sw_index;
885 unsigned int write_index;
886 QDF_STATUS status;
887 struct hif_softc *scn;
888
889 CE_state = (struct CE_state *)copyeng;
890 dest_ring = CE_state->dest_ring;
891 if (!dest_ring)
892 return QDF_STATUS_E_FAILURE;
893
894 scn = CE_state->scn;
895 qdf_spin_lock(&CE_state->ce_index_lock);
896 nentries_mask = dest_ring->nentries_mask;
897 sw_index = dest_ring->sw_index;
898 write_index = dest_ring->write_index;
899 if (write_index != sw_index) {
900 struct CE_dest_desc *dest_ring_base =
901 (struct CE_dest_desc *)dest_ring->
902 base_addr_owner_space;
903 struct CE_dest_desc *dest_desc =
904 CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
905
906 /* Return data from completed destination descriptor */
907 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc);
908
909 if (per_CE_contextp)
910 *per_CE_contextp = CE_state->recv_context;
911
912 if (per_transfer_contextp) {
913 *per_transfer_contextp =
914 dest_ring->per_transfer_context[sw_index];
915 }
916 dest_ring->per_transfer_context[sw_index] = 0; /* sanity */
917
918 /* Update sw_index */
919 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
920 dest_ring->sw_index = sw_index;
921 status = QDF_STATUS_SUCCESS;
922 } else {
923 status = QDF_STATUS_E_FAILURE;
924 }
925 qdf_spin_unlock(&CE_state->ce_index_lock);
926
927 return status;
928 }
929
930 /*
931 * Guts of ce_completed_send_next.
932 * The caller takes responsibility for any necessary locking.
933 */
934 static QDF_STATUS
ce_completed_send_next_nolock_legacy(struct CE_state * CE_state,void ** per_CE_contextp,void ** per_transfer_contextp,qdf_dma_addr_t * bufferp,unsigned int * nbytesp,unsigned int * transfer_idp,unsigned int * sw_idx,unsigned int * hw_idx,uint32_t * toeplitz_hash_result)935 ce_completed_send_next_nolock_legacy(struct CE_state *CE_state,
936 void **per_CE_contextp,
937 void **per_transfer_contextp,
938 qdf_dma_addr_t *bufferp,
939 unsigned int *nbytesp,
940 unsigned int *transfer_idp,
941 unsigned int *sw_idx,
942 unsigned int *hw_idx,
943 uint32_t *toeplitz_hash_result)
944 {
945 QDF_STATUS status = QDF_STATUS_E_FAILURE;
946 struct CE_ring_state *src_ring = CE_state->src_ring;
947 uint32_t ctrl_addr = CE_state->ctrl_addr;
948 unsigned int nentries_mask = src_ring->nentries_mask;
949 unsigned int sw_index = src_ring->sw_index;
950 unsigned int read_index;
951 struct hif_softc *scn = CE_state->scn;
952
953 if (src_ring->hw_index == sw_index) {
954 /*
955 * The SW completion index has caught up with the cached
956 * version of the HW completion index.
957 * Update the cached HW completion index to see whether
958 * the SW has really caught up to the HW, or if the cached
959 * value of the HW index has become stale.
960 */
961 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
962 return QDF_STATUS_E_FAILURE;
963 src_ring->hw_index =
964 CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr);
965 if (Q_TARGET_ACCESS_END(scn) < 0)
966 return QDF_STATUS_E_FAILURE;
967 }
968 read_index = src_ring->hw_index;
969
970 if (sw_idx)
971 *sw_idx = sw_index;
972
973 if (hw_idx)
974 *hw_idx = read_index;
975
976 if ((read_index != sw_index) && (read_index != 0xffffffff)) {
977 struct CE_src_desc *shadow_base =
978 (struct CE_src_desc *)src_ring->shadow_base;
979 struct CE_src_desc *shadow_src_desc =
980 CE_SRC_RING_TO_DESC(shadow_base, sw_index);
981 #ifdef QCA_WIFI_3_0
982 struct CE_src_desc *src_ring_base =
983 (struct CE_src_desc *)src_ring->base_addr_owner_space;
984 struct CE_src_desc *src_desc =
985 CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
986 #endif
987 hif_record_ce_desc_event(scn, CE_state->id,
988 HIF_TX_DESC_COMPLETION,
989 (union ce_desc *)shadow_src_desc,
990 src_ring->per_transfer_context[sw_index],
991 sw_index, shadow_src_desc->nbytes);
992
993 /* Return data from completed source descriptor */
994 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc);
995 *nbytesp = shadow_src_desc->nbytes;
996 *transfer_idp = shadow_src_desc->meta_data;
997 #ifdef QCA_WIFI_3_0
998 *toeplitz_hash_result = src_desc->toeplitz_hash_result;
999 #else
1000 *toeplitz_hash_result = 0;
1001 #endif
1002 if (per_CE_contextp)
1003 *per_CE_contextp = CE_state->send_context;
1004
1005 if (per_transfer_contextp) {
1006 *per_transfer_contextp =
1007 src_ring->per_transfer_context[sw_index];
1008 }
1009 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
1010
1011 /* Update sw_index */
1012 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1013 src_ring->sw_index = sw_index;
1014 status = QDF_STATUS_SUCCESS;
1015 }
1016
1017 return status;
1018 }
1019
1020 static QDF_STATUS
ce_cancel_send_next_legacy(struct CE_handle * copyeng,void ** per_CE_contextp,void ** per_transfer_contextp,qdf_dma_addr_t * bufferp,unsigned int * nbytesp,unsigned int * transfer_idp,uint32_t * toeplitz_hash_result)1021 ce_cancel_send_next_legacy(struct CE_handle *copyeng,
1022 void **per_CE_contextp,
1023 void **per_transfer_contextp,
1024 qdf_dma_addr_t *bufferp,
1025 unsigned int *nbytesp,
1026 unsigned int *transfer_idp,
1027 uint32_t *toeplitz_hash_result)
1028 {
1029 struct CE_state *CE_state;
1030 struct CE_ring_state *src_ring;
1031 unsigned int nentries_mask;
1032 unsigned int sw_index;
1033 unsigned int write_index;
1034 QDF_STATUS status;
1035 struct hif_softc *scn;
1036
1037 CE_state = (struct CE_state *)copyeng;
1038 src_ring = CE_state->src_ring;
1039 if (!src_ring)
1040 return QDF_STATUS_E_FAILURE;
1041
1042 scn = CE_state->scn;
1043 qdf_spin_lock(&CE_state->ce_index_lock);
1044 nentries_mask = src_ring->nentries_mask;
1045 sw_index = src_ring->sw_index;
1046 write_index = src_ring->write_index;
1047
1048 if (write_index != sw_index) {
1049 struct CE_src_desc *src_ring_base =
1050 (struct CE_src_desc *)src_ring->base_addr_owner_space;
1051 struct CE_src_desc *src_desc =
1052 CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1053
1054 /* Return data from completed source descriptor */
1055 *bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc);
1056 *nbytesp = src_desc->nbytes;
1057 *transfer_idp = src_desc->meta_data;
1058 #ifdef QCA_WIFI_3_0
1059 *toeplitz_hash_result = src_desc->toeplitz_hash_result;
1060 #else
1061 *toeplitz_hash_result = 0;
1062 #endif
1063
1064 if (per_CE_contextp)
1065 *per_CE_contextp = CE_state->send_context;
1066
1067 if (per_transfer_contextp) {
1068 *per_transfer_contextp =
1069 src_ring->per_transfer_context[sw_index];
1070 }
1071 src_ring->per_transfer_context[sw_index] = 0; /* sanity */
1072
1073 /* Update sw_index */
1074 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1075 src_ring->sw_index = sw_index;
1076 status = QDF_STATUS_SUCCESS;
1077 } else {
1078 status = QDF_STATUS_E_FAILURE;
1079 }
1080 qdf_spin_unlock(&CE_state->ce_index_lock);
1081
1082 return status;
1083 }
1084
1085 /*
1086 * Adjust interrupts for the copy complete handler.
1087 * If it's needed for either send or recv, then unmask
1088 * this interrupt; otherwise, mask it.
1089 *
1090 * Called with target_lock held.
1091 */
1092 static void
ce_per_engine_handler_adjust_legacy(struct CE_state * CE_state,int disable_copy_compl_intr)1093 ce_per_engine_handler_adjust_legacy(struct CE_state *CE_state,
1094 int disable_copy_compl_intr)
1095 {
1096 uint32_t ctrl_addr = CE_state->ctrl_addr;
1097 struct hif_softc *scn = CE_state->scn;
1098
1099 CE_state->disable_copy_compl_intr = disable_copy_compl_intr;
1100
1101 if (CE_state->msi_supported)
1102 return;
1103
1104 if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1105 return;
1106
1107 if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
1108 hif_err_rl("Target access is not allowed");
1109 return;
1110 }
1111
1112 if ((!disable_copy_compl_intr) &&
1113 (CE_state->send_cb || CE_state->recv_cb))
1114 CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1115 else
1116 CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1117
1118 if (CE_state->watermark_cb)
1119 CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1120 else
1121 CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1122 Q_TARGET_ACCESS_END(scn);
1123 }
1124
1125 #ifdef QCA_WIFI_WCN6450
ce_enqueue_desc(struct CE_handle * copyeng,qdf_nbuf_t msdu,unsigned int transfer_id,uint32_t download_len)1126 int ce_enqueue_desc(struct CE_handle *copyeng, qdf_nbuf_t msdu,
1127 unsigned int transfer_id, uint32_t download_len)
1128 {
1129 struct CE_state *ce_state = (struct CE_state *)copyeng;
1130 struct hif_softc *scn = ce_state->scn;
1131 struct CE_ring_state *src_ring = ce_state->src_ring;
1132 u_int32_t ctrl_addr = ce_state->ctrl_addr;
1133 unsigned int nentries_mask = src_ring->nentries_mask;
1134 unsigned int write_index;
1135 unsigned int sw_index;
1136 unsigned int frag_len;
1137 uint64_t dma_addr;
1138 uint32_t user_flags;
1139 enum hif_ce_event_type type = FAST_TX_SOFTWARE_INDEX_UPDATE;
1140
1141 /*
1142 * Create a log assuming the call will go through, and if not, we would
1143 * add an error trace as well.
1144 * Please add the same failure log for any additional error paths.
1145 */
1146 DPTRACE(qdf_dp_trace(msdu,
1147 QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD,
1148 QDF_TRACE_DEFAULT_PDEV_ID,
1149 qdf_nbuf_data_addr(msdu),
1150 sizeof(qdf_nbuf_data(msdu)), QDF_TX));
1151
1152 DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
1153
1154 write_index = src_ring->write_index;
1155 sw_index = src_ring->sw_index;
1156 hif_record_ce_desc_event(scn, ce_state->id,
1157 FAST_TX_SOFTWARE_INDEX_UPDATE,
1158 NULL, NULL, sw_index, 0);
1159
1160 if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1)
1161 < SLOTS_PER_DATAPATH_TX)) {
1162 hif_err_rl("Source ring full, required %d, available %d",
1163 SLOTS_PER_DATAPATH_TX,
1164 CE_RING_DELTA(nentries_mask, write_index,
1165 sw_index - 1));
1166 OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
1167
1168 DPTRACE(qdf_dp_trace(NULL,
1169 QDF_DP_TRACE_CE_FAST_PACKET_ERR_RECORD,
1170 QDF_TRACE_DEFAULT_PDEV_ID,
1171 NULL, 0, QDF_TX));
1172
1173 return -ENOSPC;
1174 }
1175
1176 {
1177 struct CE_src_desc *src_ring_base =
1178 (struct CE_src_desc *)src_ring->base_addr_owner_space;
1179 struct CE_src_desc *shadow_base =
1180 (struct CE_src_desc *)src_ring->shadow_base;
1181 struct CE_src_desc *src_desc =
1182 CE_SRC_RING_TO_DESC(src_ring_base, write_index);
1183 struct CE_src_desc *shadow_src_desc =
1184 CE_SRC_RING_TO_DESC(shadow_base, write_index);
1185
1186 /*
1187 * First fill out the ring descriptor for the HTC HTT frame
1188 * header. These are uncached writes. Should we use a local
1189 * structure instead?
1190 */
1191 /* HTT/HTC header can be passed as a argument */
1192 dma_addr = qdf_nbuf_get_frag_paddr(msdu, 0);
1193 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
1194 0xFFFFFFFF);
1195 user_flags = qdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK;
1196 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
1197 shadow_src_desc->meta_data = transfer_id;
1198 shadow_src_desc->nbytes = qdf_nbuf_get_frag_len(msdu, 0);
1199 ce_validate_nbytes(shadow_src_desc->nbytes, ce_state);
1200 download_len -= shadow_src_desc->nbytes;
1201 /*
1202 * HTC HTT header is a word stream, so byte swap if CE byte
1203 * swap enabled
1204 */
1205 shadow_src_desc->byte_swap = ((ce_state->attr_flags &
1206 CE_ATTR_BYTE_SWAP_DATA) != 0);
1207 /* For the first one, it still does not need to write */
1208 shadow_src_desc->gather = 1;
1209 *src_desc = *shadow_src_desc;
1210 /* By default we could initialize the transfer context to this
1211 * value
1212 */
1213 src_ring->per_transfer_context[write_index] =
1214 CE_SENDLIST_ITEM_CTXT;
1215 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
1216
1217 src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index);
1218 shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index);
1219 /*
1220 * Now fill out the ring descriptor for the actual data
1221 * packet
1222 */
1223 dma_addr = qdf_nbuf_get_frag_paddr(msdu, 1);
1224 shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
1225 0xFFFFFFFF);
1226 /*
1227 * Clear packet offset for all but the first CE desc.
1228 */
1229 user_flags &= ~CE_DESC_PKT_OFFSET_BIT_M;
1230 ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
1231 shadow_src_desc->meta_data = transfer_id;
1232
1233 /* get actual packet length */
1234 frag_len = qdf_nbuf_get_frag_len(msdu, 1);
1235
1236 /* download remaining bytes of payload */
1237 shadow_src_desc->nbytes = download_len;
1238 ce_validate_nbytes(shadow_src_desc->nbytes, ce_state);
1239 if (shadow_src_desc->nbytes > frag_len)
1240 shadow_src_desc->nbytes = frag_len;
1241
1242 /* Data packet is a byte stream, so disable byte swap */
1243 shadow_src_desc->byte_swap = 0;
1244 /* For the last one, gather is not set */
1245 shadow_src_desc->gather = 0;
1246 *src_desc = *shadow_src_desc;
1247 src_ring->per_transfer_context[write_index] = msdu;
1248
1249 hif_record_ce_desc_event(scn, ce_state->id, type,
1250 (union ce_desc *)src_desc,
1251 src_ring->per_transfer_context[write_index],
1252 write_index, shadow_src_desc->nbytes);
1253
1254 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
1255
1256 DPTRACE(qdf_dp_trace(msdu,
1257 QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD,
1258 QDF_TRACE_DEFAULT_PDEV_ID,
1259 qdf_nbuf_data_addr(msdu),
1260 sizeof(qdf_nbuf_data(msdu)), QDF_TX));
1261 }
1262
1263 src_ring->write_index = write_index;
1264
1265 return 0;
1266 }
1267
ce_legacy_msi_param_setup(struct hif_softc * scn,uint32_t ctrl_addr,uint32_t ce_id,struct CE_attr * attr)1268 static void ce_legacy_msi_param_setup(struct hif_softc *scn, uint32_t ctrl_addr,
1269 uint32_t ce_id, struct CE_attr *attr)
1270 {
1271 uint32_t addr_low;
1272 uint32_t addr_high;
1273 uint32_t msi_data_start;
1274 uint32_t msi_data_count;
1275 uint32_t msi_irq_start;
1276 uint32_t tmp;
1277 int ret;
1278 int irq_id;
1279
1280 ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
1281 &msi_data_count, &msi_data_start,
1282 &msi_irq_start);
1283
1284 /* msi config not found */
1285 if (ret) {
1286 hif_debug("Failed to get user msi assignment ret %d", ret);
1287 return;
1288 }
1289
1290 irq_id = scn->int_assignment->msi_idx[ce_id];
1291 pld_get_msi_address(scn->qdf_dev->dev, &addr_low, &addr_high);
1292
1293 CE_MSI_ADDR_LOW_SET(scn, ctrl_addr, addr_low);
1294 tmp = CE_MSI_ADDR_HIGH_GET(scn, ctrl_addr);
1295 tmp &= ~CE_RING_BASE_ADDR_HIGH_MASK;
1296 tmp |= (addr_high & CE_RING_BASE_ADDR_HIGH_MASK);
1297 CE_MSI_ADDR_HIGH_SET(scn, ctrl_addr, tmp);
1298 CE_MSI_DATA_SET(scn, ctrl_addr, irq_id + msi_data_start);
1299 CE_MSI_EN_SET(scn, ctrl_addr);
1300 }
1301
ce_legacy_src_intr_thres_setup(struct hif_softc * scn,uint32_t ctrl_addr,struct CE_attr * attr,uint32_t timer_thrs,uint32_t count_thrs)1302 static void ce_legacy_src_intr_thres_setup(struct hif_softc *scn,
1303 uint32_t ctrl_addr,
1304 struct CE_attr *attr,
1305 uint32_t timer_thrs,
1306 uint32_t count_thrs)
1307 {
1308 uint32_t tmp;
1309
1310 tmp = CE_CHANNEL_SRC_BATCH_TIMER_INT_SETUP_GET(scn, ctrl_addr);
1311
1312 if (count_thrs) {
1313 tmp &= ~CE_SRC_BATCH_COUNTER_THRESH_MASK;
1314 tmp |= ((count_thrs << CE_SRC_BATCH_COUNTER_THRESH_LSB) &
1315 CE_SRC_BATCH_COUNTER_THRESH_MASK);
1316 }
1317
1318 if (timer_thrs) {
1319 tmp &= ~CE_SRC_BATCH_TIMER_THRESH_MASK;
1320 tmp |= ((timer_thrs << CE_SRC_BATCH_TIMER_THRESH_LSB) &
1321 CE_SRC_BATCH_TIMER_THRESH_MASK);
1322 }
1323
1324 CE_CHANNEL_SRC_BATCH_TIMER_INT_SETUP(scn, ctrl_addr, tmp);
1325 CE_CHANNEL_SRC_TIMER_BATCH_INT_EN(scn, ctrl_addr);
1326 }
1327
ce_legacy_dest_intr_thres_setup(struct hif_softc * scn,uint32_t ctrl_addr,struct CE_attr * attr,uint32_t timer_thrs,uint32_t count_thrs)1328 static void ce_legacy_dest_intr_thres_setup(struct hif_softc *scn,
1329 uint32_t ctrl_addr,
1330 struct CE_attr *attr,
1331 uint32_t timer_thrs,
1332 uint32_t count_thrs)
1333 {
1334 uint32_t tmp;
1335
1336 tmp = CE_CHANNEL_DST_BATCH_TIMER_INT_SETUP_GET(scn, ctrl_addr);
1337
1338 if (count_thrs) {
1339 tmp &= ~CE_DST_BATCH_COUNTER_THRESH_MASK;
1340 tmp |= ((count_thrs << CE_DST_BATCH_COUNTER_THRESH_LSB) &
1341 CE_DST_BATCH_COUNTER_THRESH_MASK);
1342 }
1343
1344 if (timer_thrs) {
1345 tmp &= ~CE_DST_BATCH_TIMER_THRESH_MASK;
1346 tmp |= ((timer_thrs << CE_DST_BATCH_TIMER_THRESH_LSB) &
1347 CE_DST_BATCH_TIMER_THRESH_MASK);
1348 }
1349
1350 CE_CHANNEL_DST_BATCH_TIMER_INT_SETUP(scn, ctrl_addr, tmp);
1351 CE_CHANNEL_DST_TIMER_BATCH_INT_EN(scn, ctrl_addr);
1352 }
1353 #else
ce_legacy_msi_param_setup(struct hif_softc * scn,uint32_t ctrl_addr,uint32_t ce_id,struct CE_attr * attr)1354 static void ce_legacy_msi_param_setup(struct hif_softc *scn, uint32_t ctrl_addr,
1355 uint32_t ce_id, struct CE_attr *attr)
1356 {
1357 }
1358
ce_legacy_src_intr_thres_setup(struct hif_softc * scn,uint32_t ctrl_addr,struct CE_attr * attr,uint32_t timer_thrs,uint32_t count_thrs)1359 static void ce_legacy_src_intr_thres_setup(struct hif_softc *scn,
1360 uint32_t ctrl_addr,
1361 struct CE_attr *attr,
1362 uint32_t timer_thrs,
1363 uint32_t count_thrs)
1364 {
1365 }
1366
ce_legacy_dest_intr_thres_setup(struct hif_softc * scn,uint32_t ctrl_addr,struct CE_attr * attr,uint32_t timer_thrs,uint32_t count_thrs)1367 static void ce_legacy_dest_intr_thres_setup(struct hif_softc *scn,
1368 uint32_t ctrl_addr,
1369 struct CE_attr *attr,
1370 uint32_t timer_thrs,
1371 uint32_t count_thrs)
1372 {
1373 }
1374 #endif /* QCA_WIFI_WCN6450 */
1375
ce_legacy_src_ring_setup(struct hif_softc * scn,uint32_t ce_id,struct CE_ring_state * src_ring,struct CE_attr * attr)1376 static void ce_legacy_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
1377 struct CE_ring_state *src_ring,
1378 struct CE_attr *attr)
1379 {
1380 uint32_t ctrl_addr;
1381 uint64_t dma_addr;
1382 uint32_t timer_thrs;
1383 uint32_t count_thrs;
1384
1385 QDF_ASSERT(ce_id < scn->ce_count);
1386 ctrl_addr = CE_BASE_ADDRESS(ce_id);
1387
1388 src_ring->hw_index =
1389 CE_SRC_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
1390 src_ring->sw_index = src_ring->hw_index;
1391 src_ring->write_index =
1392 CE_SRC_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
1393 dma_addr = src_ring->base_addr_CE_space;
1394 CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr,
1395 (uint32_t)(dma_addr & 0xFFFFFFFF));
1396
1397 /* if SR_BA_ADDRESS_HIGH register exists */
1398 if (is_register_supported(SR_BA_ADDRESS_HIGH)) {
1399 uint32_t tmp;
1400
1401 tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET(
1402 scn, ctrl_addr);
1403 tmp &= ~CE_RING_BASE_ADDR_HIGH_MASK;
1404 dma_addr =
1405 ((dma_addr >> 32) & CE_RING_BASE_ADDR_HIGH_MASK) | tmp;
1406 CE_SRC_RING_BASE_ADDR_HIGH_SET(scn,
1407 ctrl_addr, (uint32_t)dma_addr);
1408 }
1409 CE_SRC_RING_SZ_SET(scn, ctrl_addr, src_ring->nentries);
1410 CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max);
1411 #ifdef BIG_ENDIAN_HOST
1412 /* Enable source ring byte swap for big endian host */
1413 CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
1414 #endif
1415 CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0);
1416 CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, src_ring->nentries);
1417
1418 if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
1419 /* In 8us units */
1420 timer_thrs = CE_SRC_BATCH_TIMER_THRESHOLD >> 3;
1421 count_thrs = CE_SRC_BATCH_COUNTER_THRESHOLD;
1422
1423 ce_legacy_msi_param_setup(scn, ctrl_addr, ce_id, attr);
1424 ce_legacy_src_intr_thres_setup(scn, ctrl_addr, attr,
1425 timer_thrs, count_thrs);
1426 }
1427 }
1428
ce_legacy_dest_ring_setup(struct hif_softc * scn,uint32_t ce_id,struct CE_ring_state * dest_ring,struct CE_attr * attr)1429 static void ce_legacy_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id,
1430 struct CE_ring_state *dest_ring,
1431 struct CE_attr *attr)
1432 {
1433 uint32_t ctrl_addr;
1434 uint64_t dma_addr;
1435 uint32_t timer_thrs;
1436 uint32_t count_thrs;
1437
1438 QDF_ASSERT(ce_id < scn->ce_count);
1439 ctrl_addr = CE_BASE_ADDRESS(ce_id);
1440 dest_ring->sw_index =
1441 CE_DEST_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
1442 dest_ring->write_index =
1443 CE_DEST_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
1444 dma_addr = dest_ring->base_addr_CE_space;
1445 CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr,
1446 (uint32_t)(dma_addr & 0xFFFFFFFF));
1447
1448 /* if DR_BA_ADDRESS_HIGH exists */
1449 if (is_register_supported(DR_BA_ADDRESS_HIGH)) {
1450 uint32_t tmp;
1451
1452 tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn,
1453 ctrl_addr);
1454 tmp &= ~CE_RING_BASE_ADDR_HIGH_MASK;
1455 dma_addr =
1456 ((dma_addr >> 32) & CE_RING_BASE_ADDR_HIGH_MASK) | tmp;
1457 CE_DEST_RING_BASE_ADDR_HIGH_SET(scn,
1458 ctrl_addr, (uint32_t)dma_addr);
1459 }
1460
1461 CE_DEST_RING_SZ_SET(scn, ctrl_addr, dest_ring->nentries);
1462 #ifdef BIG_ENDIAN_HOST
1463 /* Enable Dest ring byte swap for big endian host */
1464 CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
1465 #endif
1466 CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0);
1467 CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, dest_ring->nentries);
1468
1469 if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
1470 /* In 8us units */
1471 timer_thrs = CE_DST_BATCH_TIMER_THRESHOLD >> 3;
1472 count_thrs = CE_DST_BATCH_COUNTER_THRESHOLD;
1473
1474 ce_legacy_msi_param_setup(scn, ctrl_addr, ce_id, attr);
1475 ce_legacy_dest_intr_thres_setup(scn, ctrl_addr, attr,
1476 timer_thrs, count_thrs);
1477 }
1478 }
1479
ce_get_desc_size_legacy(uint8_t ring_type)1480 static uint32_t ce_get_desc_size_legacy(uint8_t ring_type)
1481 {
1482 switch (ring_type) {
1483 case CE_RING_SRC:
1484 return sizeof(struct CE_src_desc);
1485 case CE_RING_DEST:
1486 return sizeof(struct CE_dest_desc);
1487 case CE_RING_STATUS:
1488 qdf_assert(0);
1489 return 0;
1490 default:
1491 return 0;
1492 }
1493
1494 return 0;
1495 }
1496
ce_ring_setup_legacy(struct hif_softc * scn,uint8_t ring_type,uint32_t ce_id,struct CE_ring_state * ring,struct CE_attr * attr)1497 static int ce_ring_setup_legacy(struct hif_softc *scn, uint8_t ring_type,
1498 uint32_t ce_id, struct CE_ring_state *ring,
1499 struct CE_attr *attr)
1500 {
1501 int status = Q_TARGET_ACCESS_BEGIN(scn);
1502
1503 if (status < 0)
1504 goto out;
1505
1506 switch (ring_type) {
1507 case CE_RING_SRC:
1508 ce_legacy_src_ring_setup(scn, ce_id, ring, attr);
1509 break;
1510 case CE_RING_DEST:
1511 ce_legacy_dest_ring_setup(scn, ce_id, ring, attr);
1512 break;
1513 case CE_RING_STATUS:
1514 default:
1515 qdf_assert(0);
1516 break;
1517 }
1518
1519 Q_TARGET_ACCESS_END(scn);
1520 out:
1521 return status;
1522 }
1523
ce_prepare_shadow_register_v2_cfg_legacy(struct hif_softc * scn,struct pld_shadow_reg_v2_cfg ** shadow_config,int * num_shadow_registers_configured)1524 static void ce_prepare_shadow_register_v2_cfg_legacy(struct hif_softc *scn,
1525 struct pld_shadow_reg_v2_cfg **shadow_config,
1526 int *num_shadow_registers_configured)
1527 {
1528 *num_shadow_registers_configured = 0;
1529 *shadow_config = NULL;
1530 }
1531
ce_check_int_watermark(struct CE_state * CE_state,unsigned int * flags)1532 static bool ce_check_int_watermark(struct CE_state *CE_state,
1533 unsigned int *flags)
1534 {
1535 uint32_t ce_int_status;
1536 uint32_t ctrl_addr = CE_state->ctrl_addr;
1537 struct hif_softc *scn = CE_state->scn;
1538
1539 ce_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
1540 if (ce_int_status & CE_WATERMARK_MASK) {
1541 /* Convert HW IS bits to software flags */
1542 *flags =
1543 (ce_int_status & CE_WATERMARK_MASK) >>
1544 CE_WM_SHFT;
1545 return true;
1546 }
1547
1548 return false;
1549 }
1550
hif_display_ctrl_traffic_pipes_state(struct hif_opaque_softc * hif_ctx)1551 void hif_display_ctrl_traffic_pipes_state(struct hif_opaque_softc *hif_ctx) { }
1552
1553 #ifdef HIF_CE_LOG_INFO
1554 /**
1555 * ce_get_index_info_legacy(): Get CE index info
1556 * @scn: HIF Context
1557 * @ce_state: CE opaque handle
1558 * @info: CE info
1559 *
1560 * Return: 0 for success and non zero for failure
1561 */
1562 static
ce_get_index_info_legacy(struct hif_softc * scn,void * ce_state,struct ce_index * info)1563 int ce_get_index_info_legacy(struct hif_softc *scn, void *ce_state,
1564 struct ce_index *info)
1565 {
1566 struct CE_state *state = (struct CE_state *)ce_state;
1567
1568 info->id = state->id;
1569 if (state->src_ring) {
1570 info->u.legacy_info.sw_index = state->src_ring->sw_index;
1571 info->u.legacy_info.write_index = state->src_ring->write_index;
1572 } else if (state->dest_ring) {
1573 info->u.legacy_info.sw_index = state->dest_ring->sw_index;
1574 info->u.legacy_info.write_index = state->dest_ring->write_index;
1575 }
1576
1577 return 0;
1578 }
1579 #endif
1580
1581 #ifdef CONFIG_SHADOW_V3
ce_prepare_shadow_register_v3_cfg_legacy(struct hif_softc * scn,struct pld_shadow_reg_v3_cfg ** shadow_config,int * num_shadow_registers_configured)1582 static void ce_prepare_shadow_register_v3_cfg_legacy(struct hif_softc *scn,
1583 struct pld_shadow_reg_v3_cfg **shadow_config,
1584 int *num_shadow_registers_configured)
1585 {
1586 hif_get_shadow_reg_config_v3(scn, shadow_config,
1587 num_shadow_registers_configured);
1588
1589 if (*num_shadow_registers_configured != 0) {
1590 hif_err("shadow register configuration already constructed");
1591 return;
1592 }
1593
1594 hif_preare_shadow_register_cfg_v3(scn);
1595 hif_get_shadow_reg_config_v3(scn, shadow_config,
1596 num_shadow_registers_configured);
1597 }
1598 #endif
1599
1600 struct ce_ops ce_service_legacy = {
1601 .ce_get_desc_size = ce_get_desc_size_legacy,
1602 .ce_ring_setup = ce_ring_setup_legacy,
1603 .ce_sendlist_send = ce_sendlist_send_legacy,
1604 .ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_legacy,
1605 .ce_revoke_recv_next = ce_revoke_recv_next_legacy,
1606 .ce_cancel_send_next = ce_cancel_send_next_legacy,
1607 .ce_recv_buf_enqueue = ce_recv_buf_enqueue_legacy,
1608 .ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_legacy,
1609 .ce_send_nolock = ce_send_nolock_legacy,
1610 .watermark_int = ce_check_int_watermark,
1611 .ce_completed_send_next_nolock = ce_completed_send_next_nolock_legacy,
1612 .ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_legacy,
1613 .ce_send_entries_done_nolock = ce_send_entries_done_nolock_legacy,
1614 .ce_prepare_shadow_register_v2_cfg =
1615 ce_prepare_shadow_register_v2_cfg_legacy,
1616 #ifdef HIF_CE_LOG_INFO
1617 .ce_get_index_info =
1618 ce_get_index_info_legacy,
1619 #endif
1620 #ifdef CONFIG_SHADOW_V3
1621 .ce_prepare_shadow_register_v3_cfg =
1622 ce_prepare_shadow_register_v3_cfg_legacy,
1623 #endif
1624 };
1625
ce_services_legacy(void)1626 struct ce_ops *ce_services_legacy(void)
1627 {
1628 return &ce_service_legacy;
1629 }
1630
1631 qdf_export_symbol(ce_services_legacy);
1632
ce_service_legacy_init(void)1633 void ce_service_legacy_init(void)
1634 {
1635 ce_service_register_module(CE_SVC_LEGACY, &ce_services_legacy);
1636 }
1637