1 /*
2 * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include "i_bmi.h"
21 #include "cds_api.h"
22
23 /* APIs visible to the driver */
24
bmi_init(struct ol_context * ol_ctx)25 QDF_STATUS bmi_init(struct ol_context *ol_ctx)
26 {
27 struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
28 struct hif_opaque_softc *scn = ol_ctx->scn;
29 qdf_device_t qdf_dev = ol_ctx->qdf_dev;
30
31 if (!scn) {
32 BMI_ERR("Invalid scn Context");
33 bmi_assert(0);
34 return QDF_STATUS_NOT_INITIALIZED;
35 }
36
37 if (!qdf_dev->dev) {
38 BMI_ERR("%s: Invalid Device Pointer", __func__);
39 return QDF_STATUS_NOT_INITIALIZED;
40 }
41
42 info->bmi_done = false;
43
44 if (!info->bmi_cmd_buff) {
45 info->bmi_cmd_buff =
46 qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev,
47 MAX_BMI_CMDBUF_SZ,
48 &info->bmi_cmd_da);
49 if (!info->bmi_cmd_buff) {
50 BMI_ERR("No Memory for BMI Command");
51 return QDF_STATUS_E_NOMEM;
52 }
53 }
54
55 if (!info->bmi_rsp_buff) {
56 info->bmi_rsp_buff =
57 qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev,
58 MAX_BMI_CMDBUF_SZ,
59 &info->bmi_rsp_da);
60 if (!info->bmi_rsp_buff) {
61 BMI_ERR("No Memory for BMI Response");
62 goto end;
63 }
64 }
65 return QDF_STATUS_SUCCESS;
66 end:
67 qdf_mem_free_consistent(qdf_dev, qdf_dev->dev, MAX_BMI_CMDBUF_SZ,
68 info->bmi_cmd_buff, info->bmi_cmd_da, 0);
69 info->bmi_cmd_buff = NULL;
70 return QDF_STATUS_E_NOMEM;
71 }
72
bmi_cleanup(struct ol_context * ol_ctx)73 void bmi_cleanup(struct ol_context *ol_ctx)
74 {
75 struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
76 qdf_device_t qdf_dev;
77
78 if (!info || !ol_ctx) {
79 BMI_WARN("%s: no bmi to cleanup", __func__);
80 return;
81 }
82
83 qdf_dev = ol_ctx->qdf_dev;
84 if (!qdf_dev || !qdf_dev->dev) {
85 BMI_ERR("%s: Invalid Device Pointer", __func__);
86 return;
87 }
88
89 if (info->bmi_cmd_buff) {
90 qdf_mem_free_consistent(qdf_dev, qdf_dev->dev,
91 MAX_BMI_CMDBUF_SZ,
92 info->bmi_cmd_buff, info->bmi_cmd_da, 0);
93 info->bmi_cmd_buff = NULL;
94 info->bmi_cmd_da = 0;
95 }
96
97 if (info->bmi_rsp_buff) {
98 qdf_mem_free_consistent(qdf_dev, qdf_dev->dev,
99 MAX_BMI_CMDBUF_SZ,
100 info->bmi_rsp_buff, info->bmi_rsp_da, 0);
101 info->bmi_rsp_buff = NULL;
102 info->bmi_rsp_da = 0;
103 }
104 }
105
106 /**
107 * bmi_done() - finish the bmi operation
108 * @ol_ctx: the bmi context
109 *
110 * does some sanity checking.
111 * exchanges one last message with firmware.
112 * frees some buffers.
113 *
114 * Return: QDF_STATUS_SUCCESS if bmi isn't needed.
115 * QDF_STATUS_SUCCESS if bmi finishes.
116 * otherwise returns failure.
117 */
bmi_done(struct ol_context * ol_ctx)118 QDF_STATUS bmi_done(struct ol_context *ol_ctx)
119 {
120 QDF_STATUS status = QDF_STATUS_SUCCESS;
121
122 if (NO_BMI)
123 return QDF_STATUS_SUCCESS;
124
125 if (!ol_ctx) {
126 BMI_ERR("%s: null context", __func__);
127 return QDF_STATUS_E_NOMEM;
128 }
129 hif_claim_device(ol_ctx->scn);
130
131 if (!hif_needs_bmi(ol_ctx->scn))
132 return QDF_STATUS_SUCCESS;
133
134 status = bmi_done_local(ol_ctx);
135 if (status != QDF_STATUS_SUCCESS)
136 BMI_ERR("BMI_DONE Failed status:%d", status);
137
138 return status;
139 }
140
bmi_target_ready(struct hif_opaque_softc * scn,void * cfg_ctx)141 void bmi_target_ready(struct hif_opaque_softc *scn, void *cfg_ctx)
142 {
143 ol_target_ready(scn, cfg_ctx);
144 }
145
146 static QDF_STATUS
bmi_get_target_info_message_based(struct bmi_target_info * targ_info,struct ol_context * ol_ctx)147 bmi_get_target_info_message_based(struct bmi_target_info *targ_info,
148 struct ol_context *ol_ctx)
149 {
150 int status = 0;
151 struct hif_opaque_softc *scn = ol_ctx->scn;
152 struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
153 uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
154 uint8_t *bmi_rsp_buff = info->bmi_rsp_buff;
155 uint32_t cid, length;
156 qdf_dma_addr_t cmd = info->bmi_cmd_da;
157 qdf_dma_addr_t rsp = info->bmi_rsp_da;
158
159 if (!bmi_cmd_buff || !bmi_rsp_buff) {
160 BMI_ERR("%s:BMI CMD/RSP Buffer is NULL", __func__);
161 return QDF_STATUS_NOT_INITIALIZED;
162 }
163
164 cid = BMI_GET_TARGET_INFO;
165
166 qdf_mem_copy(bmi_cmd_buff, &cid, sizeof(cid));
167 length = sizeof(struct bmi_target_info);
168
169 status = hif_exchange_bmi_msg(scn, cmd, rsp, bmi_cmd_buff, sizeof(cid),
170 (uint8_t *)bmi_rsp_buff, &length,
171 BMI_EXCHANGE_TIMEOUT_MS);
172 if (status) {
173 BMI_ERR("Failed to target info: status:%d", status);
174 return QDF_STATUS_E_FAILURE;
175 }
176
177 qdf_mem_copy(targ_info, bmi_rsp_buff, length);
178 return QDF_STATUS_SUCCESS;
179 }
180
181 QDF_STATUS
bmi_get_target_info(struct bmi_target_info * targ_info,struct ol_context * ol_ctx)182 bmi_get_target_info(struct bmi_target_info *targ_info,
183 struct ol_context *ol_ctx)
184 {
185 struct hif_opaque_softc *scn = ol_ctx->scn;
186 struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
187 QDF_STATUS status;
188
189 if (info->bmi_done) {
190 BMI_ERR("BMI Phase is Already Done");
191 return QDF_STATUS_E_PERM;
192 }
193
194 switch (hif_get_bus_type(scn)) {
195 case QDF_BUS_TYPE_PCI:
196 case QDF_BUS_TYPE_SNOC:
197 case QDF_BUS_TYPE_USB:
198 status = bmi_get_target_info_message_based(targ_info, ol_ctx);
199 break;
200 #ifdef HIF_SDIO
201 case QDF_BUS_TYPE_SDIO:
202 status = hif_reg_based_get_target_info(scn, targ_info);
203 break;
204 #endif
205 default:
206 status = QDF_STATUS_E_FAILURE;
207 break;
208 }
209 return status;
210 }
211
bmi_download_firmware(struct ol_context * ol_ctx)212 QDF_STATUS bmi_download_firmware(struct ol_context *ol_ctx)
213 {
214 struct hif_opaque_softc *scn;
215
216 if (!ol_ctx) {
217 if (NO_BMI) {
218 /* ol_ctx is not allocated in NO_BMI case */
219 return QDF_STATUS_SUCCESS;
220 }
221
222 BMI_ERR("ol_ctx is NULL");
223 bmi_assert(0);
224 return QDF_STATUS_NOT_INITIALIZED;
225 }
226
227 scn = ol_ctx->scn;
228
229 if (!scn) {
230 BMI_ERR("Invalid scn context");
231 bmi_assert(0);
232 return QDF_STATUS_NOT_INITIALIZED;
233 }
234
235 if (!hif_needs_bmi(scn))
236 return QDF_STATUS_SUCCESS;
237 else
238 hif_register_bmi_callbacks(scn);
239
240 return bmi_firmware_download(ol_ctx);
241 }
242
bmi_read_soc_register(uint32_t address,uint32_t * param,struct ol_context * ol_ctx)243 QDF_STATUS bmi_read_soc_register(uint32_t address, uint32_t *param,
244 struct ol_context *ol_ctx)
245 {
246 struct hif_opaque_softc *scn = ol_ctx->scn;
247 uint32_t cid;
248 int status;
249 uint32_t offset, param_len;
250 struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
251 uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
252 uint8_t *bmi_rsp_buff = info->bmi_rsp_buff;
253 qdf_dma_addr_t cmd = info->bmi_cmd_da;
254 qdf_dma_addr_t rsp = info->bmi_rsp_da;
255
256 bmi_assert(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address)));
257 qdf_mem_zero(bmi_cmd_buff, sizeof(cid) + sizeof(address));
258 qdf_mem_zero(bmi_rsp_buff, sizeof(cid) + sizeof(address));
259
260 if (info->bmi_done) {
261 BMI_DBG("Command disallowed");
262 return QDF_STATUS_E_PERM;
263 }
264
265 BMI_DBG("BMI Read SOC Register:device: 0x%pK, address: 0x%x",
266 scn, address);
267
268 cid = BMI_READ_SOC_REGISTER;
269
270 offset = 0;
271 qdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
272 offset += sizeof(cid);
273 qdf_mem_copy(&(bmi_cmd_buff[offset]), &address, sizeof(address));
274 offset += sizeof(address);
275 param_len = sizeof(*param);
276 status = hif_exchange_bmi_msg(scn, cmd, rsp, bmi_cmd_buff, offset,
277 bmi_rsp_buff, ¶m_len, BMI_EXCHANGE_TIMEOUT_MS);
278 if (status) {
279 BMI_DBG("Unable to read from the device; status:%d", status);
280 return QDF_STATUS_E_FAILURE;
281 }
282 qdf_mem_copy(param, bmi_rsp_buff, sizeof(*param));
283
284 BMI_DBG("BMI Read SOC Register: Exit value: %d", *param);
285 return QDF_STATUS_SUCCESS;
286 }
287
bmi_write_soc_register(uint32_t address,uint32_t param,struct ol_context * ol_ctx)288 QDF_STATUS bmi_write_soc_register(uint32_t address, uint32_t param,
289 struct ol_context *ol_ctx)
290 {
291 struct hif_opaque_softc *scn = ol_ctx->scn;
292 uint32_t cid;
293 int status;
294 uint32_t offset;
295 struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
296 uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
297 uint32_t size = sizeof(cid) + sizeof(address) + sizeof(param);
298 qdf_dma_addr_t cmd = info->bmi_cmd_da;
299 qdf_dma_addr_t rsp = info->bmi_rsp_da;
300
301 bmi_assert(BMI_COMMAND_FITS(size));
302 qdf_mem_zero(bmi_cmd_buff, size);
303
304 if (info->bmi_done) {
305 BMI_DBG("Command disallowed");
306 return QDF_STATUS_E_FAILURE;
307 }
308
309 BMI_DBG("SOC Register Write:device:0x%pK, addr:0x%x, param:%d",
310 scn, address, param);
311
312 cid = BMI_WRITE_SOC_REGISTER;
313
314 offset = 0;
315 qdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
316 offset += sizeof(cid);
317 qdf_mem_copy(&(bmi_cmd_buff[offset]), &address, sizeof(address));
318 offset += sizeof(address);
319 qdf_mem_copy(&(bmi_cmd_buff[offset]), ¶m, sizeof(param));
320 offset += sizeof(param);
321 status = hif_exchange_bmi_msg(scn, cmd, rsp, bmi_cmd_buff, offset,
322 NULL, NULL, 0);
323 if (status) {
324 BMI_ERR("Unable to write to the device: status:%d", status);
325 return QDF_STATUS_E_FAILURE;
326 }
327
328 BMI_DBG("BMI Read SOC Register: Exit");
329 return QDF_STATUS_SUCCESS;
330 }
331
332 static QDF_STATUS
bmilz_data(uint8_t * buffer,uint32_t length,struct ol_context * ol_ctx)333 bmilz_data(uint8_t *buffer, uint32_t length, struct ol_context *ol_ctx)
334 {
335 uint32_t cid;
336 int status;
337 uint32_t offset;
338 uint32_t remaining, txlen;
339 const uint32_t header = sizeof(cid) + sizeof(length);
340 struct hif_opaque_softc *scn = ol_ctx->scn;
341 struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
342 uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
343 qdf_dma_addr_t cmd = info->bmi_cmd_da;
344 qdf_dma_addr_t rsp = info->bmi_rsp_da;
345
346 bmi_assert(BMI_COMMAND_FITS(BMI_DATASZ_MAX + header));
347 qdf_mem_zero(bmi_cmd_buff, BMI_DATASZ_MAX + header);
348
349 if (info->bmi_done) {
350 BMI_ERR("Command disallowed");
351 return QDF_STATUS_E_PERM;
352 }
353
354 BMI_DBG("BMI Send LZ Data: device: 0x%pK, length: %d",
355 scn, length);
356
357 cid = BMI_LZ_DATA;
358
359 remaining = length;
360 while (remaining) {
361 txlen = (remaining < (BMI_DATASZ_MAX - header)) ?
362 remaining : (BMI_DATASZ_MAX - header);
363 offset = 0;
364 qdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
365 offset += sizeof(cid);
366 qdf_mem_copy(&(bmi_cmd_buff[offset]), &txlen, sizeof(txlen));
367 offset += sizeof(txlen);
368 qdf_mem_copy(&(bmi_cmd_buff[offset]),
369 &buffer[length - remaining], txlen);
370 offset += txlen;
371 status = hif_exchange_bmi_msg(scn, cmd, rsp,
372 bmi_cmd_buff, offset,
373 NULL, NULL, 0);
374 if (status) {
375 BMI_ERR("Failed to write to the device: status:%d",
376 status);
377 return QDF_STATUS_E_FAILURE;
378 }
379 remaining -= txlen;
380 }
381
382 BMI_DBG("BMI LZ Data: Exit");
383
384 return QDF_STATUS_SUCCESS;
385 }
386
bmi_sign_stream_start(uint32_t address,uint8_t * buffer,uint32_t length,struct ol_context * ol_ctx)387 QDF_STATUS bmi_sign_stream_start(uint32_t address, uint8_t *buffer,
388 uint32_t length, struct ol_context *ol_ctx)
389 {
390 uint32_t cid;
391 int status;
392 uint32_t offset;
393 const uint32_t header = sizeof(cid) + sizeof(address) + sizeof(length);
394 uint8_t aligned_buf[BMI_DATASZ_MAX + 4];
395 uint8_t *src;
396 struct hif_opaque_softc *scn = ol_ctx->scn;
397 struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
398 uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
399 uint32_t remaining, txlen;
400 qdf_dma_addr_t cmd = info->bmi_cmd_da;
401 qdf_dma_addr_t rsp = info->bmi_rsp_da;
402
403 bmi_assert(BMI_COMMAND_FITS(BMI_DATASZ_MAX + header));
404 qdf_mem_zero(bmi_cmd_buff, BMI_DATASZ_MAX + header);
405
406 if (info->bmi_done) {
407 BMI_ERR("Command disallowed");
408 return QDF_STATUS_E_PERM;
409 }
410
411 BMI_ERR("Sign Stream start:device:0x%pK, addr:0x%x, length:%d",
412 scn, address, length);
413
414 cid = BMI_SIGN_STREAM_START;
415 remaining = length;
416 while (remaining) {
417 src = &buffer[length - remaining];
418 if (remaining < (BMI_DATASZ_MAX - header)) {
419 if (remaining & 0x3) {
420 memcpy(aligned_buf, src, remaining);
421 remaining = remaining + (4 - (remaining & 0x3));
422 src = aligned_buf;
423 }
424 txlen = remaining;
425 } else {
426 txlen = (BMI_DATASZ_MAX - header);
427 }
428
429 offset = 0;
430 qdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
431 offset += sizeof(cid);
432 qdf_mem_copy(&(bmi_cmd_buff[offset]), &address,
433 sizeof(address));
434 offset += sizeof(offset);
435 qdf_mem_copy(&(bmi_cmd_buff[offset]), &txlen, sizeof(txlen));
436 offset += sizeof(txlen);
437 qdf_mem_copy(&(bmi_cmd_buff[offset]), src, txlen);
438 offset += txlen;
439 status = hif_exchange_bmi_msg(scn, cmd, rsp,
440 bmi_cmd_buff, offset, NULL,
441 NULL, BMI_EXCHANGE_TIMEOUT_MS);
442 if (status) {
443 BMI_ERR("Unable to write to the device: status:%d",
444 status);
445 return QDF_STATUS_E_FAILURE;
446 }
447 remaining -= txlen;
448 }
449 BMI_DBG("BMI SIGN Stream Start: Exit");
450
451 return QDF_STATUS_SUCCESS;
452 }
453
454 static QDF_STATUS
bmilz_stream_start(uint32_t address,struct ol_context * ol_ctx)455 bmilz_stream_start(uint32_t address, struct ol_context *ol_ctx)
456 {
457 uint32_t cid;
458 int status;
459 uint32_t offset;
460 struct hif_opaque_softc *scn = ol_ctx->scn;
461 struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
462 uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
463 qdf_dma_addr_t cmd = info->bmi_cmd_da;
464 qdf_dma_addr_t rsp = info->bmi_rsp_da;
465
466 bmi_assert(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address)));
467 qdf_mem_zero(bmi_cmd_buff, sizeof(cid) + sizeof(address));
468
469 if (info->bmi_done) {
470 BMI_DBG("Command disallowed");
471 return QDF_STATUS_E_PERM;
472 }
473 BMI_DBG("BMI LZ Stream Start: (device: 0x%pK, address: 0x%x)",
474 scn, address);
475
476 cid = BMI_LZ_STREAM_START;
477 offset = 0;
478 qdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
479 offset += sizeof(cid);
480 qdf_mem_copy(&(bmi_cmd_buff[offset]), &address, sizeof(address));
481 offset += sizeof(address);
482 status = hif_exchange_bmi_msg(scn, cmd, rsp, bmi_cmd_buff, offset,
483 NULL, NULL, 0);
484 if (status) {
485 BMI_ERR("Unable to Start LZ Stream to the device status:%d",
486 status);
487 return QDF_STATUS_E_FAILURE;
488 }
489 BMI_DBG("BMI LZ Stream: Exit");
490 return QDF_STATUS_SUCCESS;
491 }
492
493 QDF_STATUS
bmi_fast_download(uint32_t address,uint8_t * buffer,uint32_t length,struct ol_context * ol_ctx)494 bmi_fast_download(uint32_t address, uint8_t *buffer,
495 uint32_t length, struct ol_context *ol_ctx)
496 {
497 QDF_STATUS status = QDF_STATUS_E_FAILURE;
498 uint32_t last_word = 0;
499 uint32_t last_word_offset = length & ~0x3;
500 uint32_t unaligned_bytes = length & 0x3;
501
502 status = bmilz_stream_start(address, ol_ctx);
503 if (status != QDF_STATUS_SUCCESS)
504 goto end;
505
506 /* copy the last word into a zero padded buffer */
507 if (unaligned_bytes)
508 qdf_mem_copy(&last_word, &buffer[last_word_offset],
509 unaligned_bytes);
510
511 status = bmilz_data(buffer, last_word_offset, ol_ctx);
512
513 if (status != QDF_STATUS_SUCCESS)
514 goto end;
515
516 if (unaligned_bytes)
517 status = bmilz_data((uint8_t *) &last_word, 4, ol_ctx);
518
519 if (status != QDF_STATUS_SUCCESS)
520 /*
521 * Close compressed stream and open a new (fake) one.
522 * This serves mainly to flush Target caches.
523 */
524 status = bmilz_stream_start(0x00, ol_ctx);
525 end:
526 return status;
527 }
528
529 /**
530 * ol_cds_init() - API to initialize global CDS OL Context
531 * @qdf_dev: QDF Device
532 * @hif_ctx: HIF Context
533 *
534 * Return: Success/Failure
535 */
ol_cds_init(qdf_device_t qdf_dev,void * hif_ctx)536 QDF_STATUS ol_cds_init(qdf_device_t qdf_dev, void *hif_ctx)
537 {
538 struct ol_context *ol_info;
539 QDF_STATUS status = QDF_STATUS_SUCCESS;
540
541 if (NO_BMI)
542 return QDF_STATUS_SUCCESS; /* no BMI for Q6 bring up */
543
544 status = cds_alloc_context(QDF_MODULE_ID_BMI,
545 (void **)&ol_info, sizeof(*ol_info));
546
547 if (status != QDF_STATUS_SUCCESS) {
548 BMI_ERR("%s: CDS Allocation failed for ol_bmi context",
549 __func__);
550 return status;
551 }
552
553 ol_info->qdf_dev = qdf_dev;
554 ol_info->scn = hif_ctx;
555 ol_info->tgt_def.targetdef = hif_get_targetdef(hif_ctx);
556
557 qdf_create_work(qdf_dev, &ol_info->ramdump_work,
558 ramdump_work_handler, ol_info);
559 qdf_create_work(qdf_dev, &ol_info->fw_indication_work,
560 fw_indication_work_handler, ol_info);
561
562 qdf_wake_lock_create(&ol_info->fw_dl_wakelock,
563 "fw_download_wakelock");
564
565 return status;
566 }
567
568 /**
569 * ol_cds_free() - API to free the global CDS OL Context
570 *
571 * Return: void
572 */
ol_cds_free(void)573 void ol_cds_free(void)
574 {
575 struct ol_context *ol_info = cds_get_context(QDF_MODULE_ID_BMI);
576
577 if (NO_BMI)
578 return;
579
580 qdf_wake_lock_destroy(&ol_info->fw_dl_wakelock);
581 cds_free_context(QDF_MODULE_ID_BMI, ol_info);
582 }
583