1 /*
2 * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include "targcfg.h"
21 #include "qdf_lock.h"
22 #include "qdf_status.h"
23 #include "qdf_status.h"
24 #include <qdf_atomic.h> /* qdf_atomic_read */
25 #include <targaddrs.h>
26 #include "hif_io32.h"
27 #include <hif.h>
28 #include "regtable.h"
29 #define ATH_MODULE_NAME hif
30 #include <a_debug.h>
31 #include "hif_main.h"
32 #include "ce_api.h"
33 #include "ce_bmi.h"
34 #include "qdf_trace.h"
35 #include "hif_debug.h"
36 #include "bmi_msg.h"
37 #include "qdf_module.h"
38
39 /* Track a BMI transaction that is in progress */
40 #ifndef BIT
41 #define BIT(n) (1 << (n))
42 #endif
43
44 enum {
45 BMI_REQ_SEND_DONE = BIT(0), /* the bmi tx completion */
46 BMI_RESP_RECV_DONE = BIT(1), /* the bmi respond is received */
47 };
48
49 struct BMI_transaction {
50 struct HIF_CE_state *hif_state;
51 qdf_semaphore_t bmi_transaction_sem;
52 uint8_t *bmi_request_host; /* Req BMI msg in Host addr space */
53 qdf_dma_addr_t bmi_request_CE; /* Req BMI msg in CE addr space */
54 uint32_t bmi_request_length; /* Length of BMI request */
55 uint8_t *bmi_response_host; /* Rsp BMI msg in Host addr space */
56 qdf_dma_addr_t bmi_response_CE; /* Rsp BMI msg in CE addr space */
57 unsigned int bmi_response_length; /* Length of received response */
58 unsigned int bmi_timeout_ms;
59 uint32_t bmi_transaction_flags; /* flags for the transcation */
60 };
61
62 /*
63 * send/recv completion functions for BMI.
64 * NB: The "net_buf" parameter is actually just a
65 * straight buffer, not an sk_buff.
66 */
hif_bmi_send_done(struct CE_handle * copyeng,void * ce_context,void * transfer_context,qdf_dma_addr_t data,unsigned int nbytes,unsigned int transfer_id,unsigned int sw_index,unsigned int hw_index,uint32_t toeplitz_hash_result)67 void hif_bmi_send_done(struct CE_handle *copyeng, void *ce_context,
68 void *transfer_context, qdf_dma_addr_t data,
69 unsigned int nbytes,
70 unsigned int transfer_id, unsigned int sw_index,
71 unsigned int hw_index, uint32_t toeplitz_hash_result)
72 {
73 struct BMI_transaction *transaction =
74 (struct BMI_transaction *)transfer_context;
75
76 #ifdef BMI_RSP_POLLING
77 /*
78 * Fix EV118783, Release a semaphore after sending
79 * no matter whether a response is been expecting now.
80 */
81 qdf_semaphore_release(&transaction->bmi_transaction_sem);
82 #else
83 /*
84 * If a response is anticipated, we'll complete the
85 * transaction if the response has been received.
86 * If no response is anticipated, complete the
87 * transaction now.
88 */
89 transaction->bmi_transaction_flags |= BMI_REQ_SEND_DONE;
90
91 /* resp is't needed or has already been received,
92 * never assume resp comes later then this
93 */
94 if (!transaction->bmi_response_CE ||
95 (transaction->bmi_transaction_flags & BMI_RESP_RECV_DONE)) {
96 qdf_semaphore_release(&transaction->bmi_transaction_sem);
97 }
98 #endif
99 }
100
101 #ifndef BMI_RSP_POLLING
hif_bmi_recv_data(struct CE_handle * copyeng,void * ce_context,void * transfer_context,qdf_dma_addr_t data,unsigned int nbytes,unsigned int transfer_id,unsigned int flags)102 void hif_bmi_recv_data(struct CE_handle *copyeng, void *ce_context,
103 void *transfer_context, qdf_dma_addr_t data,
104 unsigned int nbytes,
105 unsigned int transfer_id, unsigned int flags)
106 {
107 struct BMI_transaction *transaction =
108 (struct BMI_transaction *)transfer_context;
109
110 transaction->bmi_response_length = nbytes;
111 transaction->bmi_transaction_flags |= BMI_RESP_RECV_DONE;
112
113 /* when both send/recv are done, the sem can be released */
114 if (transaction->bmi_transaction_flags & BMI_REQ_SEND_DONE)
115 qdf_semaphore_release(&transaction->bmi_transaction_sem);
116 }
117 #endif
118
119 /* Timeout for BMI message exchange */
120 #define HIF_EXCHANGE_BMI_MSG_TIMEOUT 6000
121
hif_exchange_bmi_msg(struct hif_opaque_softc * hif_ctx,qdf_dma_addr_t bmi_cmd_da,qdf_dma_addr_t bmi_rsp_da,uint8_t * bmi_request,uint32_t request_length,uint8_t * bmi_response,uint32_t * bmi_response_lengthp,uint32_t TimeoutMS)122 QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
123 qdf_dma_addr_t bmi_cmd_da,
124 qdf_dma_addr_t bmi_rsp_da,
125 uint8_t *bmi_request,
126 uint32_t request_length,
127 uint8_t *bmi_response,
128 uint32_t *bmi_response_lengthp,
129 uint32_t TimeoutMS)
130 {
131 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
132 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
133 struct HIF_CE_pipe_info *send_pipe_info =
134 &(hif_state->pipe_info[BMI_CE_NUM_TO_TARG]);
135 struct CE_handle *ce_send_hdl = send_pipe_info->ce_hdl;
136 qdf_dma_addr_t CE_request, CE_response = 0;
137 struct BMI_transaction *transaction = NULL;
138 QDF_STATUS status = QDF_STATUS_SUCCESS;
139 struct HIF_CE_pipe_info *recv_pipe_info =
140 &(hif_state->pipe_info[BMI_CE_NUM_TO_HOST]);
141 struct CE_handle *ce_recv = recv_pipe_info->ce_hdl;
142 unsigned int mux_id = 0;
143 unsigned int transaction_id = 0xffff;
144 unsigned int user_flags = 0;
145 #ifdef BMI_RSP_POLLING
146 qdf_dma_addr_t buf;
147 unsigned int completed_nbytes, id, flags;
148 int i;
149 #endif
150
151 transaction =
152 (struct BMI_transaction *)qdf_mem_malloc(sizeof(*transaction));
153 if (unlikely(!transaction))
154 return QDF_STATUS_E_NOMEM;
155
156 transaction_id = (mux_id & MUX_ID_MASK) |
157 (transaction_id & TRANSACTION_ID_MASK);
158 #ifdef QCA_WIFI_3_0
159 user_flags &= DESC_DATA_FLAG_MASK;
160 #endif
161 A_TARGET_ACCESS_LIKELY(scn);
162
163 /* Initialize bmi_transaction_sem to block */
164 qdf_semaphore_init(&transaction->bmi_transaction_sem);
165 qdf_semaphore_acquire(&transaction->bmi_transaction_sem);
166
167 transaction->hif_state = hif_state;
168 transaction->bmi_request_host = bmi_request;
169 transaction->bmi_request_length = request_length;
170 transaction->bmi_response_length = 0;
171 transaction->bmi_timeout_ms = TimeoutMS;
172 transaction->bmi_transaction_flags = 0;
173
174 /*
175 * CE_request = dma_map_single(dev,
176 * (void *)bmi_request, request_length, DMA_TO_DEVICE);
177 */
178 CE_request = bmi_cmd_da;
179 transaction->bmi_request_CE = CE_request;
180
181 if (bmi_response) {
182
183 /*
184 * CE_response = dma_map_single(dev, bmi_response,
185 * BMI_DATASZ_MAX, DMA_FROM_DEVICE);
186 */
187 CE_response = bmi_rsp_da;
188 transaction->bmi_response_host = bmi_response;
189 transaction->bmi_response_CE = CE_response;
190 /* dma_cache_sync(dev, bmi_response,
191 * BMI_DATASZ_MAX, DMA_FROM_DEVICE);
192 */
193 qdf_mem_dma_sync_single_for_device(scn->qdf_dev,
194 CE_response,
195 BMI_DATASZ_MAX,
196 DMA_FROM_DEVICE);
197 ce_recv_buf_enqueue(ce_recv, transaction,
198 transaction->bmi_response_CE);
199 /* NB: see HIF_BMI_recv_done */
200 } else {
201 transaction->bmi_response_host = NULL;
202 transaction->bmi_response_CE = 0;
203 }
204
205 /* dma_cache_sync(dev, bmi_request, request_length, DMA_TO_DEVICE); */
206 qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_request,
207 request_length, DMA_TO_DEVICE);
208
209 status =
210 ce_send(ce_send_hdl, transaction,
211 CE_request, request_length,
212 transaction_id, 0, user_flags);
213 ASSERT(status == QDF_STATUS_SUCCESS);
214 /* NB: see hif_bmi_send_done */
215
216 /* TBDXXX: handle timeout */
217
218 /* Wait for BMI request/response transaction to complete */
219 /* Always just wait for BMI request here if
220 * BMI_RSP_POLLING is defined
221 */
222 if (qdf_semaphore_acquire_timeout
223 (&transaction->bmi_transaction_sem,
224 HIF_EXCHANGE_BMI_MSG_TIMEOUT)) {
225 hif_err("BMI transaction timeout. Please check the HW interface!!");
226 qdf_mem_free(transaction);
227 return QDF_STATUS_E_TIMEOUT;
228 }
229
230 if (bmi_response) {
231 #ifdef BMI_RSP_POLLING
232 /* Fix EV118783, do not wait a semaphore for the BMI response
233 * since the relative interruption may be lost.
234 * poll the BMI response instead.
235 */
236 i = 0;
237 while (ce_completed_recv_next(
238 ce_recv, NULL, NULL, &buf,
239 &completed_nbytes, &id,
240 &flags) != QDF_STATUS_SUCCESS) {
241 if (i++ > BMI_RSP_TO_MILLISEC) {
242 hif_err("Can't get bmi response");
243 status = QDF_STATUS_E_BUSY;
244 break;
245 }
246 OS_DELAY(1000);
247 }
248
249 if ((status == QDF_STATUS_SUCCESS) && bmi_response_lengthp)
250 *bmi_response_lengthp = completed_nbytes;
251 #else
252 if ((status == QDF_STATUS_SUCCESS) && bmi_response_lengthp) {
253 *bmi_response_lengthp =
254 transaction->bmi_response_length;
255 }
256 #endif
257
258 }
259
260 /* dma_unmap_single(dev, transaction->bmi_request_CE,
261 * request_length, DMA_TO_DEVICE);
262 * bus_unmap_single(scn->sc_osdev,
263 * transaction->bmi_request_CE,
264 * request_length, BUS_DMA_TODEVICE);
265 */
266
267 if (status != QDF_STATUS_SUCCESS) {
268 qdf_dma_addr_t unused_buffer;
269 unsigned int unused_nbytes;
270 unsigned int unused_id;
271 unsigned int toeplitz_hash_result;
272
273 ce_cancel_send_next(ce_send_hdl,
274 NULL, NULL, &unused_buffer,
275 &unused_nbytes, &unused_id,
276 &toeplitz_hash_result);
277 }
278
279 A_TARGET_ACCESS_UNLIKELY(scn);
280 qdf_mem_free(transaction);
281 return status;
282 }
283 qdf_export_symbol(hif_exchange_bmi_msg);
284
285 #ifdef BMI_RSP_POLLING
286 #define BMI_RSP_CB_REGISTER 0
287 #else
288 #define BMI_RSP_CB_REGISTER 1
289 #endif
290
291 /**
292 * hif_register_bmi_callbacks() - register bmi callbacks
293 * @hif_ctx: hif context
294 *
295 * Bmi phase uses different copy complete callbacks than mission mode.
296 */
hif_register_bmi_callbacks(struct hif_opaque_softc * hif_ctx)297 void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx)
298 {
299 struct HIF_CE_pipe_info *pipe_info;
300 struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx);
301 struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
302
303 /*
304 * Initially, establish CE completion handlers for use with BMI.
305 * These are overwritten with generic handlers after we exit BMI phase.
306 */
307 pipe_info = &hif_state->pipe_info[BMI_CE_NUM_TO_TARG];
308 ce_send_cb_register(pipe_info->ce_hdl, hif_bmi_send_done, pipe_info, 0);
309
310 if (BMI_RSP_CB_REGISTER) {
311 pipe_info = &hif_state->pipe_info[BMI_CE_NUM_TO_HOST];
312 ce_recv_cb_register(
313 pipe_info->ce_hdl, hif_bmi_recv_data, pipe_info, 0);
314 }
315 }
316