1*5113495bSYour Name /*
2*5113495bSYour Name * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
3*5113495bSYour Name * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name *
5*5113495bSYour Name * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name * above copyright notice and this permission notice appear in all
8*5113495bSYour Name * copies.
9*5113495bSYour Name *
10*5113495bSYour Name * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name */
19*5113495bSYour Name
20*5113495bSYour Name #ifdef CONFIG_SDIO_TRANSFER_MAILBOX
21*5113495bSYour Name #define ATH_MODULE_NAME hif
22*5113495bSYour Name #include <linux/kthread.h>
23*5113495bSYour Name #include <qdf_types.h>
24*5113495bSYour Name #include <qdf_status.h>
25*5113495bSYour Name #include <qdf_timer.h>
26*5113495bSYour Name #include <qdf_time.h>
27*5113495bSYour Name #include <qdf_lock.h>
28*5113495bSYour Name #include <qdf_mem.h>
29*5113495bSYour Name #include <qdf_util.h>
30*5113495bSYour Name #include <qdf_defer.h>
31*5113495bSYour Name #include <qdf_atomic.h>
32*5113495bSYour Name #include <qdf_nbuf.h>
33*5113495bSYour Name #include <qdf_threads.h>
34*5113495bSYour Name #include <athdefs.h>
35*5113495bSYour Name #include <qdf_net_types.h>
36*5113495bSYour Name #include <a_types.h>
37*5113495bSYour Name #include <athdefs.h>
38*5113495bSYour Name #include <a_osapi.h>
39*5113495bSYour Name #include <hif.h>
40*5113495bSYour Name #include <htc_internal.h>
41*5113495bSYour Name #include <htc_services.h>
42*5113495bSYour Name #include <a_debug.h>
43*5113495bSYour Name #include "hif_sdio_internal.h"
44*5113495bSYour Name #include "if_sdio.h"
45*5113495bSYour Name #include "regtable.h"
46*5113495bSYour Name #include "transfer.h"
47*5113495bSYour Name
48*5113495bSYour Name /*
49*5113495bSYour Name * The following commit was introduced in v5.17:
50*5113495bSYour Name * cead18552660 ("exit: Rename complete_and_exit to kthread_complete_and_exit")
51*5113495bSYour Name * Use the old name for kernels before 5.17
52*5113495bSYour Name */
53*5113495bSYour Name #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0))
54*5113495bSYour Name #define kthread_complete_and_exit(c, s) complete_and_exit(c, s)
55*5113495bSYour Name #endif
56*5113495bSYour Name
57*5113495bSYour Name /* by default setup a bounce buffer for the data packets,
58*5113495bSYour Name * if the underlying host controller driver
59*5113495bSYour Name * does not use DMA you may be able to skip this step
60*5113495bSYour Name * and save the memory allocation and transfer time
61*5113495bSYour Name */
62*5113495bSYour Name #define HIF_USE_DMA_BOUNCE_BUFFER 1
63*5113495bSYour Name #if HIF_USE_DMA_BOUNCE_BUFFER
64*5113495bSYour Name /* macro to check if DMA buffer is WORD-aligned and DMA-able.
65*5113495bSYour Name * Most host controllers assume the
66*5113495bSYour Name * buffer is DMA'able and will bug-check otherwise (i.e. buffers on the stack).
67*5113495bSYour Name * virt_addr_valid check fails on stack memory.
68*5113495bSYour Name */
69*5113495bSYour Name #define BUFFER_NEEDS_BOUNCE(buffer) (((unsigned long)(buffer) & 0x3) || \
70*5113495bSYour Name !virt_addr_valid((buffer)))
71*5113495bSYour Name #else
72*5113495bSYour Name #define BUFFER_NEEDS_BOUNCE(buffer) (false)
73*5113495bSYour Name #endif
74*5113495bSYour Name
75*5113495bSYour Name #ifdef SDIO_3_0
76*5113495bSYour Name /**
77*5113495bSYour Name * set_extended_mbox_size() - set extended MBOX size
78*5113495bSYour Name * @pinfo: sdio mailbox info
79*5113495bSYour Name *
80*5113495bSYour Name * Return: none.
81*5113495bSYour Name */
set_extended_mbox_size(struct hif_device_mbox_info * pinfo)82*5113495bSYour Name static void set_extended_mbox_size(struct hif_device_mbox_info *pinfo)
83*5113495bSYour Name {
84*5113495bSYour Name pinfo->mbox_prop[0].extended_size =
85*5113495bSYour Name HIF_MBOX0_EXTENDED_WIDTH_AR6320_ROME_2_0;
86*5113495bSYour Name pinfo->mbox_prop[1].extended_size =
87*5113495bSYour Name HIF_MBOX1_EXTENDED_WIDTH_AR6320;
88*5113495bSYour Name }
89*5113495bSYour Name
90*5113495bSYour Name /**
91*5113495bSYour Name * set_extended_mbox_address() - set extended MBOX address
92*5113495bSYour Name * @pinfo: sdio mailbox info
93*5113495bSYour Name *
94*5113495bSYour Name * Return: none.
95*5113495bSYour Name */
set_extended_mbox_address(struct hif_device_mbox_info * pinfo)96*5113495bSYour Name static void set_extended_mbox_address(struct hif_device_mbox_info *pinfo)
97*5113495bSYour Name {
98*5113495bSYour Name pinfo->mbox_prop[1].extended_address =
99*5113495bSYour Name pinfo->mbox_prop[0].extended_address +
100*5113495bSYour Name pinfo->mbox_prop[0].extended_size +
101*5113495bSYour Name HIF_MBOX_DUMMY_SPACE_SIZE_AR6320;
102*5113495bSYour Name }
103*5113495bSYour Name #else
set_extended_mbox_size(struct hif_device_mbox_info * pinfo)104*5113495bSYour Name static void set_extended_mbox_size(struct hif_device_mbox_info *pinfo)
105*5113495bSYour Name {
106*5113495bSYour Name pinfo->mbox_prop[0].extended_size =
107*5113495bSYour Name HIF_MBOX0_EXTENDED_WIDTH_AR6320;
108*5113495bSYour Name }
109*5113495bSYour Name
110*5113495bSYour Name static inline void
set_extended_mbox_address(struct hif_device_mbox_info * pinfo)111*5113495bSYour Name set_extended_mbox_address(struct hif_device_mbox_info *pinfo)
112*5113495bSYour Name {
113*5113495bSYour Name }
114*5113495bSYour Name #endif
115*5113495bSYour Name
116*5113495bSYour Name /**
117*5113495bSYour Name * set_extended_mbox_window_info() - set extended MBOX window
118*5113495bSYour Name * information for SDIO interconnects
119*5113495bSYour Name * @manf_id: manufacturer id
120*5113495bSYour Name * @pinfo: sdio mailbox info
121*5113495bSYour Name *
122*5113495bSYour Name * Return: none.
123*5113495bSYour Name */
set_extended_mbox_window_info(uint16_t manf_id,struct hif_device_mbox_info * pinfo)124*5113495bSYour Name static void set_extended_mbox_window_info(uint16_t manf_id,
125*5113495bSYour Name struct hif_device_mbox_info *pinfo)
126*5113495bSYour Name {
127*5113495bSYour Name switch (manf_id & MANUFACTURER_ID_AR6K_BASE_MASK) {
128*5113495bSYour Name case MANUFACTURER_ID_AR6002_BASE:
129*5113495bSYour Name /* MBOX 0 has an extended range */
130*5113495bSYour Name
131*5113495bSYour Name pinfo->mbox_prop[0].extended_address =
132*5113495bSYour Name HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1;
133*5113495bSYour Name pinfo->mbox_prop[0].extended_size =
134*5113495bSYour Name HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1;
135*5113495bSYour Name
136*5113495bSYour Name pinfo->mbox_prop[0].extended_address =
137*5113495bSYour Name HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1;
138*5113495bSYour Name pinfo->mbox_prop[0].extended_size =
139*5113495bSYour Name HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1;
140*5113495bSYour Name
141*5113495bSYour Name pinfo->mbox_prop[0].extended_address =
142*5113495bSYour Name HIF_MBOX0_EXTENDED_BASE_ADDR_AR6004;
143*5113495bSYour Name pinfo->mbox_prop[0].extended_size =
144*5113495bSYour Name HIF_MBOX0_EXTENDED_WIDTH_AR6004;
145*5113495bSYour Name
146*5113495bSYour Name break;
147*5113495bSYour Name case MANUFACTURER_ID_AR6003_BASE:
148*5113495bSYour Name /* MBOX 0 has an extended range */
149*5113495bSYour Name pinfo->mbox_prop[0].extended_address =
150*5113495bSYour Name HIF_MBOX0_EXTENDED_BASE_ADDR_AR6003_V1;
151*5113495bSYour Name pinfo->mbox_prop[0].extended_size =
152*5113495bSYour Name HIF_MBOX0_EXTENDED_WIDTH_AR6003_V1;
153*5113495bSYour Name pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
154*5113495bSYour Name pinfo->gmbox_size = HIF_GMBOX_WIDTH;
155*5113495bSYour Name break;
156*5113495bSYour Name case MANUFACTURER_ID_AR6004_BASE:
157*5113495bSYour Name pinfo->mbox_prop[0].extended_address =
158*5113495bSYour Name HIF_MBOX0_EXTENDED_BASE_ADDR_AR6004;
159*5113495bSYour Name pinfo->mbox_prop[0].extended_size =
160*5113495bSYour Name HIF_MBOX0_EXTENDED_WIDTH_AR6004;
161*5113495bSYour Name pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
162*5113495bSYour Name pinfo->gmbox_size = HIF_GMBOX_WIDTH;
163*5113495bSYour Name break;
164*5113495bSYour Name case MANUFACTURER_ID_AR6320_BASE:
165*5113495bSYour Name {
166*5113495bSYour Name uint16_t rev = manf_id & MANUFACTURER_ID_AR6K_REV_MASK;
167*5113495bSYour Name
168*5113495bSYour Name pinfo->mbox_prop[0].extended_address =
169*5113495bSYour Name HIF_MBOX0_EXTENDED_BASE_ADDR_AR6320;
170*5113495bSYour Name if (rev < 4)
171*5113495bSYour Name pinfo->mbox_prop[0].extended_size =
172*5113495bSYour Name HIF_MBOX0_EXTENDED_WIDTH_AR6320;
173*5113495bSYour Name else
174*5113495bSYour Name set_extended_mbox_size(pinfo);
175*5113495bSYour Name set_extended_mbox_address(pinfo);
176*5113495bSYour Name pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
177*5113495bSYour Name pinfo->gmbox_size = HIF_GMBOX_WIDTH;
178*5113495bSYour Name break;
179*5113495bSYour Name }
180*5113495bSYour Name case MANUFACTURER_ID_QCA9377_BASE:
181*5113495bSYour Name case MANUFACTURER_ID_QCA9379_BASE:
182*5113495bSYour Name pinfo->mbox_prop[0].extended_address =
183*5113495bSYour Name HIF_MBOX0_EXTENDED_BASE_ADDR_AR6320;
184*5113495bSYour Name pinfo->mbox_prop[0].extended_size =
185*5113495bSYour Name HIF_MBOX0_EXTENDED_WIDTH_AR6320_ROME_2_0;
186*5113495bSYour Name pinfo->mbox_prop[1].extended_address =
187*5113495bSYour Name pinfo->mbox_prop[0].extended_address +
188*5113495bSYour Name pinfo->mbox_prop[0].extended_size +
189*5113495bSYour Name HIF_MBOX_DUMMY_SPACE_SIZE_AR6320;
190*5113495bSYour Name pinfo->mbox_prop[1].extended_size =
191*5113495bSYour Name HIF_MBOX1_EXTENDED_WIDTH_AR6320;
192*5113495bSYour Name pinfo->gmbox_address = HIF_GMBOX_BASE_ADDR;
193*5113495bSYour Name pinfo->gmbox_size = HIF_GMBOX_WIDTH;
194*5113495bSYour Name break;
195*5113495bSYour Name default:
196*5113495bSYour Name A_ASSERT(false);
197*5113495bSYour Name break;
198*5113495bSYour Name }
199*5113495bSYour Name }
200*5113495bSYour Name
201*5113495bSYour Name /** hif_dev_set_mailbox_swap() - Set the mailbox swap from firmware
202*5113495bSYour Name * @pdev : The HIF layer object
203*5113495bSYour Name *
204*5113495bSYour Name * Return: none
205*5113495bSYour Name */
hif_dev_set_mailbox_swap(struct hif_sdio_dev * pdev)206*5113495bSYour Name void hif_dev_set_mailbox_swap(struct hif_sdio_dev *pdev)
207*5113495bSYour Name {
208*5113495bSYour Name struct hif_sdio_device *hif_device = hif_dev_from_hif(pdev);
209*5113495bSYour Name
210*5113495bSYour Name HIF_ENTER();
211*5113495bSYour Name
212*5113495bSYour Name hif_device->swap_mailbox = true;
213*5113495bSYour Name
214*5113495bSYour Name HIF_EXIT();
215*5113495bSYour Name }
216*5113495bSYour Name
217*5113495bSYour Name /** hif_dev_get_mailbox_swap() - Get the mailbox swap setting
218*5113495bSYour Name * @pdev : The HIF layer object
219*5113495bSYour Name *
220*5113495bSYour Name * Return: true or false
221*5113495bSYour Name */
hif_dev_get_mailbox_swap(struct hif_sdio_dev * pdev)222*5113495bSYour Name bool hif_dev_get_mailbox_swap(struct hif_sdio_dev *pdev)
223*5113495bSYour Name {
224*5113495bSYour Name struct hif_sdio_device *hif_device;
225*5113495bSYour Name
226*5113495bSYour Name HIF_ENTER();
227*5113495bSYour Name
228*5113495bSYour Name hif_device = hif_dev_from_hif(pdev);
229*5113495bSYour Name
230*5113495bSYour Name HIF_EXIT();
231*5113495bSYour Name
232*5113495bSYour Name return hif_device->swap_mailbox;
233*5113495bSYour Name }
234*5113495bSYour Name
235*5113495bSYour Name /**
236*5113495bSYour Name * hif_dev_get_fifo_address() - get the fifo addresses for dma
237*5113495bSYour Name * @pdev: SDIO HIF object
238*5113495bSYour Name * @config: mbox address config pointer
239*5113495bSYour Name * @config_len: config length
240*5113495bSYour Name *
241*5113495bSYour Name * Return : 0 for success, non-zero for error
242*5113495bSYour Name */
hif_dev_get_fifo_address(struct hif_sdio_dev * pdev,void * config,uint32_t config_len)243*5113495bSYour Name int hif_dev_get_fifo_address(struct hif_sdio_dev *pdev,
244*5113495bSYour Name void *config,
245*5113495bSYour Name uint32_t config_len)
246*5113495bSYour Name {
247*5113495bSYour Name uint32_t count;
248*5113495bSYour Name struct hif_device_mbox_info *cfg =
249*5113495bSYour Name (struct hif_device_mbox_info *)config;
250*5113495bSYour Name
251*5113495bSYour Name for (count = 0; count < 4; count++)
252*5113495bSYour Name cfg->mbox_addresses[count] = HIF_MBOX_START_ADDR(count);
253*5113495bSYour Name
254*5113495bSYour Name if (config_len >= sizeof(struct hif_device_mbox_info)) {
255*5113495bSYour Name set_extended_mbox_window_info((uint16_t)pdev->func->device,
256*5113495bSYour Name cfg);
257*5113495bSYour Name return 0;
258*5113495bSYour Name }
259*5113495bSYour Name
260*5113495bSYour Name return -EINVAL;
261*5113495bSYour Name }
262*5113495bSYour Name
263*5113495bSYour Name /**
264*5113495bSYour Name * hif_dev_get_block_size() - get the mbox block size for dma
265*5113495bSYour Name * @config : mbox size config pointer
266*5113495bSYour Name *
267*5113495bSYour Name * Return : NONE
268*5113495bSYour Name */
hif_dev_get_block_size(void * config)269*5113495bSYour Name void hif_dev_get_block_size(void *config)
270*5113495bSYour Name {
271*5113495bSYour Name ((uint32_t *)config)[0] = HIF_MBOX0_BLOCK_SIZE;
272*5113495bSYour Name ((uint32_t *)config)[1] = HIF_MBOX1_BLOCK_SIZE;
273*5113495bSYour Name ((uint32_t *)config)[2] = HIF_MBOX2_BLOCK_SIZE;
274*5113495bSYour Name ((uint32_t *)config)[3] = HIF_MBOX3_BLOCK_SIZE;
275*5113495bSYour Name }
276*5113495bSYour Name
277*5113495bSYour Name /**
278*5113495bSYour Name * hif_dev_map_service_to_pipe() - maps ul/dl pipe to service id.
279*5113495bSYour Name * @pdev: SDIO HIF object
280*5113495bSYour Name * @svc: service index
281*5113495bSYour Name * @ul_pipe: uplink pipe id
282*5113495bSYour Name * @dl_pipe: down-linklink pipe id
283*5113495bSYour Name *
284*5113495bSYour Name * Return: 0 on success, error value on invalid map
285*5113495bSYour Name */
hif_dev_map_service_to_pipe(struct hif_sdio_dev * pdev,uint16_t svc,uint8_t * ul_pipe,uint8_t * dl_pipe)286*5113495bSYour Name QDF_STATUS hif_dev_map_service_to_pipe(struct hif_sdio_dev *pdev, uint16_t svc,
287*5113495bSYour Name uint8_t *ul_pipe, uint8_t *dl_pipe)
288*5113495bSYour Name {
289*5113495bSYour Name QDF_STATUS status = QDF_STATUS_SUCCESS;
290*5113495bSYour Name
291*5113495bSYour Name switch (svc) {
292*5113495bSYour Name case HTT_DATA_MSG_SVC:
293*5113495bSYour Name if (hif_dev_get_mailbox_swap(pdev)) {
294*5113495bSYour Name *ul_pipe = 1;
295*5113495bSYour Name *dl_pipe = 0;
296*5113495bSYour Name } else {
297*5113495bSYour Name *ul_pipe = 3;
298*5113495bSYour Name *dl_pipe = 2;
299*5113495bSYour Name }
300*5113495bSYour Name break;
301*5113495bSYour Name
302*5113495bSYour Name case HTC_CTRL_RSVD_SVC:
303*5113495bSYour Name case HTC_RAW_STREAMS_SVC:
304*5113495bSYour Name *ul_pipe = 1;
305*5113495bSYour Name *dl_pipe = 0;
306*5113495bSYour Name break;
307*5113495bSYour Name
308*5113495bSYour Name case WMI_DATA_BE_SVC:
309*5113495bSYour Name case WMI_DATA_BK_SVC:
310*5113495bSYour Name case WMI_DATA_VI_SVC:
311*5113495bSYour Name case WMI_DATA_VO_SVC:
312*5113495bSYour Name *ul_pipe = 1;
313*5113495bSYour Name *dl_pipe = 0;
314*5113495bSYour Name break;
315*5113495bSYour Name
316*5113495bSYour Name case WMI_CONTROL_SVC:
317*5113495bSYour Name if (hif_dev_get_mailbox_swap(pdev)) {
318*5113495bSYour Name *ul_pipe = 3;
319*5113495bSYour Name *dl_pipe = 2;
320*5113495bSYour Name } else {
321*5113495bSYour Name *ul_pipe = 1;
322*5113495bSYour Name *dl_pipe = 0;
323*5113495bSYour Name }
324*5113495bSYour Name break;
325*5113495bSYour Name
326*5113495bSYour Name default:
327*5113495bSYour Name hif_err("Invalid service: %d", svc);
328*5113495bSYour Name status = QDF_STATUS_E_INVAL;
329*5113495bSYour Name break;
330*5113495bSYour Name }
331*5113495bSYour Name return status;
332*5113495bSYour Name }
333*5113495bSYour Name
334*5113495bSYour Name /** hif_dev_setup_device() - Setup device specific stuff here required for hif
335*5113495bSYour Name * @pdev : HIF layer object
336*5113495bSYour Name *
337*5113495bSYour Name * return 0 on success, error otherwise
338*5113495bSYour Name */
hif_dev_setup_device(struct hif_sdio_device * pdev)339*5113495bSYour Name int hif_dev_setup_device(struct hif_sdio_device *pdev)
340*5113495bSYour Name {
341*5113495bSYour Name int status = 0;
342*5113495bSYour Name uint32_t blocksizes[MAILBOX_COUNT];
343*5113495bSYour Name
344*5113495bSYour Name status = hif_configure_device(NULL, pdev->HIFDevice,
345*5113495bSYour Name HIF_DEVICE_GET_FIFO_ADDR,
346*5113495bSYour Name &pdev->MailBoxInfo,
347*5113495bSYour Name sizeof(pdev->MailBoxInfo));
348*5113495bSYour Name
349*5113495bSYour Name if (status != QDF_STATUS_SUCCESS)
350*5113495bSYour Name hif_err("HIF_DEVICE_GET_MBOX_ADDR failed");
351*5113495bSYour Name
352*5113495bSYour Name status = hif_configure_device(NULL, pdev->HIFDevice,
353*5113495bSYour Name HIF_DEVICE_GET_BLOCK_SIZE,
354*5113495bSYour Name blocksizes, sizeof(blocksizes));
355*5113495bSYour Name if (status != QDF_STATUS_SUCCESS)
356*5113495bSYour Name hif_err("HIF_DEVICE_GET_MBOX_BLOCK_SIZE fail");
357*5113495bSYour Name
358*5113495bSYour Name pdev->BlockSize = blocksizes[MAILBOX_FOR_BLOCK_SIZE];
359*5113495bSYour Name
360*5113495bSYour Name return status;
361*5113495bSYour Name }
362*5113495bSYour Name
363*5113495bSYour Name /** hif_dev_mask_interrupts() - Disable the interrupts in the device
364*5113495bSYour Name * @pdev SDIO HIF Object
365*5113495bSYour Name *
366*5113495bSYour Name * Return: NONE
367*5113495bSYour Name */
hif_dev_mask_interrupts(struct hif_sdio_device * pdev)368*5113495bSYour Name void hif_dev_mask_interrupts(struct hif_sdio_device *pdev)
369*5113495bSYour Name {
370*5113495bSYour Name int status = QDF_STATUS_SUCCESS;
371*5113495bSYour Name
372*5113495bSYour Name HIF_ENTER();
373*5113495bSYour Name /* Disable all interrupts */
374*5113495bSYour Name LOCK_HIF_DEV(pdev);
375*5113495bSYour Name mboxEnaRegs(pdev).int_status_enable = 0;
376*5113495bSYour Name mboxEnaRegs(pdev).cpu_int_status_enable = 0;
377*5113495bSYour Name mboxEnaRegs(pdev).error_status_enable = 0;
378*5113495bSYour Name mboxEnaRegs(pdev).counter_int_status_enable = 0;
379*5113495bSYour Name UNLOCK_HIF_DEV(pdev);
380*5113495bSYour Name
381*5113495bSYour Name /* always synchronous */
382*5113495bSYour Name status = hif_read_write(pdev->HIFDevice,
383*5113495bSYour Name INT_STATUS_ENABLE_ADDRESS,
384*5113495bSYour Name (char *)&mboxEnaRegs(pdev),
385*5113495bSYour Name sizeof(struct MBOX_IRQ_ENABLE_REGISTERS),
386*5113495bSYour Name HIF_WR_SYNC_BYTE_INC, NULL);
387*5113495bSYour Name
388*5113495bSYour Name if (status != QDF_STATUS_SUCCESS)
389*5113495bSYour Name hif_err("Updating intr reg: %d", status);
390*5113495bSYour Name }
391*5113495bSYour Name
392*5113495bSYour Name /** hif_dev_unmask_interrupts() - Enable the interrupts in the device
393*5113495bSYour Name * @pdev SDIO HIF Object
394*5113495bSYour Name *
395*5113495bSYour Name * Return: NONE
396*5113495bSYour Name */
hif_dev_unmask_interrupts(struct hif_sdio_device * pdev)397*5113495bSYour Name void hif_dev_unmask_interrupts(struct hif_sdio_device *pdev)
398*5113495bSYour Name {
399*5113495bSYour Name QDF_STATUS status = QDF_STATUS_SUCCESS;
400*5113495bSYour Name
401*5113495bSYour Name LOCK_HIF_DEV(pdev);
402*5113495bSYour Name
403*5113495bSYour Name /* Enable all the interrupts except for the internal
404*5113495bSYour Name * AR6000 CPU interrupt
405*5113495bSYour Name */
406*5113495bSYour Name mboxEnaRegs(pdev).int_status_enable =
407*5113495bSYour Name INT_STATUS_ENABLE_ERROR_SET(0x01) |
408*5113495bSYour Name INT_STATUS_ENABLE_CPU_SET(0x01)
409*5113495bSYour Name | INT_STATUS_ENABLE_COUNTER_SET(0x01);
410*5113495bSYour Name
411*5113495bSYour Name /* enable 2 mboxs INT */
412*5113495bSYour Name mboxEnaRegs(pdev).int_status_enable |=
413*5113495bSYour Name INT_STATUS_ENABLE_MBOX_DATA_SET(0x01) |
414*5113495bSYour Name INT_STATUS_ENABLE_MBOX_DATA_SET(0x02);
415*5113495bSYour Name
416*5113495bSYour Name /* Set up the CPU Interrupt Status Register, enable
417*5113495bSYour Name * CPU sourced interrupt #0, #1.
418*5113495bSYour Name * #0 is used for report assertion from target
419*5113495bSYour Name * #1 is used for inform host that credit arrived
420*5113495bSYour Name */
421*5113495bSYour Name mboxEnaRegs(pdev).cpu_int_status_enable = 0x03;
422*5113495bSYour Name
423*5113495bSYour Name /* Set up the Error Interrupt Status Register */
424*5113495bSYour Name mboxEnaRegs(pdev).error_status_enable =
425*5113495bSYour Name (ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(0x01)
426*5113495bSYour Name | ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(0x01)) >> 16;
427*5113495bSYour Name
428*5113495bSYour Name /* Set up the Counter Interrupt Status Register
429*5113495bSYour Name * (only for debug interrupt to catch fatal errors)
430*5113495bSYour Name */
431*5113495bSYour Name mboxEnaRegs(pdev).counter_int_status_enable =
432*5113495bSYour Name (COUNTER_INT_STATUS_ENABLE_BIT_SET(AR6K_TARGET_DEBUG_INTR_MASK)) >> 24;
433*5113495bSYour Name
434*5113495bSYour Name UNLOCK_HIF_DEV(pdev);
435*5113495bSYour Name
436*5113495bSYour Name /* always synchronous */
437*5113495bSYour Name status = hif_read_write(pdev->HIFDevice,
438*5113495bSYour Name INT_STATUS_ENABLE_ADDRESS,
439*5113495bSYour Name (char *)&mboxEnaRegs(pdev),
440*5113495bSYour Name sizeof(struct MBOX_IRQ_ENABLE_REGISTERS),
441*5113495bSYour Name HIF_WR_SYNC_BYTE_INC,
442*5113495bSYour Name NULL);
443*5113495bSYour Name
444*5113495bSYour Name if (status != QDF_STATUS_SUCCESS)
445*5113495bSYour Name hif_err("Updating intr reg: %d", status);
446*5113495bSYour Name }
447*5113495bSYour Name
hif_dev_dump_registers(struct hif_sdio_device * pdev,struct MBOX_IRQ_PROC_REGISTERS * irq_proc,struct MBOX_IRQ_ENABLE_REGISTERS * irq_en,struct MBOX_COUNTER_REGISTERS * mbox_regs)448*5113495bSYour Name void hif_dev_dump_registers(struct hif_sdio_device *pdev,
449*5113495bSYour Name struct MBOX_IRQ_PROC_REGISTERS *irq_proc,
450*5113495bSYour Name struct MBOX_IRQ_ENABLE_REGISTERS *irq_en,
451*5113495bSYour Name struct MBOX_COUNTER_REGISTERS *mbox_regs)
452*5113495bSYour Name {
453*5113495bSYour Name int i = 0;
454*5113495bSYour Name
455*5113495bSYour Name hif_debug("Mailbox registers:");
456*5113495bSYour Name
457*5113495bSYour Name if (irq_proc) {
458*5113495bSYour Name hif_debug("HostIntStatus: 0x%x ", irq_proc->host_int_status);
459*5113495bSYour Name hif_debug("CPUIntStatus: 0x%x ", irq_proc->cpu_int_status);
460*5113495bSYour Name hif_debug("ErrorIntStatus: 0x%x ", irq_proc->error_int_status);
461*5113495bSYour Name hif_debug("CounterIntStat: 0x%x ",
462*5113495bSYour Name irq_proc->counter_int_status);
463*5113495bSYour Name hif_debug("MboxFrame: 0x%x ", irq_proc->mbox_frame);
464*5113495bSYour Name hif_debug("RxLKAValid: 0x%x ", irq_proc->rx_lookahead_valid);
465*5113495bSYour Name hif_debug("RxLKA0: 0x%x", irq_proc->rx_lookahead[0]);
466*5113495bSYour Name hif_debug("RxLKA1: 0x%x ", irq_proc->rx_lookahead[1]);
467*5113495bSYour Name hif_debug("RxLKA2: 0x%x ", irq_proc->rx_lookahead[2]);
468*5113495bSYour Name hif_debug("RxLKA3: 0x%x", irq_proc->rx_lookahead[3]);
469*5113495bSYour Name
470*5113495bSYour Name if (pdev->MailBoxInfo.gmbox_address != 0) {
471*5113495bSYour Name hif_debug("GMBOX-HostIntStatus2: 0x%x ",
472*5113495bSYour Name irq_proc->host_int_status2);
473*5113495bSYour Name hif_debug("GMBOX-RX-Avail: 0x%x ",
474*5113495bSYour Name irq_proc->gmbox_rx_avail);
475*5113495bSYour Name }
476*5113495bSYour Name }
477*5113495bSYour Name
478*5113495bSYour Name if (irq_en) {
479*5113495bSYour Name hif_debug("IntStatusEnable: 0x%x",
480*5113495bSYour Name irq_en->int_status_enable);
481*5113495bSYour Name hif_debug("CounterIntStatus: 0x%x",
482*5113495bSYour Name irq_en->counter_int_status_enable);
483*5113495bSYour Name }
484*5113495bSYour Name
485*5113495bSYour Name for (i = 0; mbox_regs && i < 4; i++)
486*5113495bSYour Name hif_debug("Counter[%d]: 0x%x", i, mbox_regs->counter[i]);
487*5113495bSYour Name }
488*5113495bSYour Name
489*5113495bSYour Name /* under HL SDIO, with Interface Memory support, we have
490*5113495bSYour Name * the following reasons to support 2 mboxs:
491*5113495bSYour Name * a) we need place different buffers in different
492*5113495bSYour Name * mempool, for example, data using Interface Memory,
493*5113495bSYour Name * desc and other using DRAM, they need different SDIO
494*5113495bSYour Name * mbox channels.
495*5113495bSYour Name * b) currently, tx mempool in LL case is separated from
496*5113495bSYour Name * main mempool, the structure (descs at the beginning
497*5113495bSYour Name * of every pool buffer) is different, because they only
498*5113495bSYour Name * need store tx desc from host. To align with LL case,
499*5113495bSYour Name * we also need 2 mbox support just as PCIe LL cases.
500*5113495bSYour Name */
501*5113495bSYour Name
502*5113495bSYour Name /**
503*5113495bSYour Name * hif_dev_map_pipe_to_mail_box() - maps pipe id to mailbox.
504*5113495bSYour Name * @pdev: The pointer to the hif device object
505*5113495bSYour Name * @pipeid: pipe index
506*5113495bSYour Name *
507*5113495bSYour Name * Return: mailbox index
508*5113495bSYour Name */
hif_dev_map_pipe_to_mail_box(struct hif_sdio_device * pdev,uint8_t pipeid)509*5113495bSYour Name static uint8_t hif_dev_map_pipe_to_mail_box(struct hif_sdio_device *pdev,
510*5113495bSYour Name uint8_t pipeid)
511*5113495bSYour Name {
512*5113495bSYour Name if (2 == pipeid || 3 == pipeid)
513*5113495bSYour Name return 1;
514*5113495bSYour Name else if (0 == pipeid || 1 == pipeid)
515*5113495bSYour Name return 0;
516*5113495bSYour Name
517*5113495bSYour Name hif_err("pipeid=%d invalid", pipeid);
518*5113495bSYour Name
519*5113495bSYour Name qdf_assert(0);
520*5113495bSYour Name
521*5113495bSYour Name return INVALID_MAILBOX_NUMBER;
522*5113495bSYour Name }
523*5113495bSYour Name
524*5113495bSYour Name /**
525*5113495bSYour Name * hif_dev_map_mail_box_to_pipe() - map sdio mailbox to htc pipe.
526*5113495bSYour Name * @pdev: The pointer to the hif device object
527*5113495bSYour Name * @mbox_index: mailbox index
528*5113495bSYour Name * @upload: boolean to decide mailbox index
529*5113495bSYour Name *
530*5113495bSYour Name * Return: Invalid pipe index
531*5113495bSYour Name */
hif_dev_map_mail_box_to_pipe(struct hif_sdio_device * pdev,uint8_t mbox_index,bool upload)532*5113495bSYour Name static uint8_t hif_dev_map_mail_box_to_pipe(struct hif_sdio_device *pdev,
533*5113495bSYour Name uint8_t mbox_index, bool upload)
534*5113495bSYour Name {
535*5113495bSYour Name if (mbox_index == 0)
536*5113495bSYour Name return upload ? 1 : 0;
537*5113495bSYour Name else if (mbox_index == 1)
538*5113495bSYour Name return upload ? 3 : 2;
539*5113495bSYour Name
540*5113495bSYour Name hif_err("mbox_index=%d, upload=%d invalid", mbox_index, upload);
541*5113495bSYour Name
542*5113495bSYour Name qdf_assert(0);
543*5113495bSYour Name
544*5113495bSYour Name return INVALID_MAILBOX_NUMBER; /* invalid pipe id */
545*5113495bSYour Name }
546*5113495bSYour Name
547*5113495bSYour Name /**
548*5113495bSYour Name * hif_get_send_address() - Get the transfer pipe address
549*5113495bSYour Name * @pdev: The pointer to the hif device object
550*5113495bSYour Name * @pipe: The pipe identifier
551*5113495bSYour Name * @addr:
552*5113495bSYour Name *
553*5113495bSYour Name * Return 0 for success and non-zero for failure to map
554*5113495bSYour Name */
hif_get_send_address(struct hif_sdio_device * pdev,uint8_t pipe,unsigned long * addr)555*5113495bSYour Name int hif_get_send_address(struct hif_sdio_device *pdev,
556*5113495bSYour Name uint8_t pipe, unsigned long *addr)
557*5113495bSYour Name {
558*5113495bSYour Name uint8_t mbox_index = INVALID_MAILBOX_NUMBER;
559*5113495bSYour Name
560*5113495bSYour Name if (!addr)
561*5113495bSYour Name return -EINVAL;
562*5113495bSYour Name
563*5113495bSYour Name mbox_index = hif_dev_map_pipe_to_mail_box(pdev, pipe);
564*5113495bSYour Name
565*5113495bSYour Name if (mbox_index == INVALID_MAILBOX_NUMBER)
566*5113495bSYour Name return -EINVAL;
567*5113495bSYour Name
568*5113495bSYour Name *addr = pdev->MailBoxInfo.mbox_prop[mbox_index].extended_address;
569*5113495bSYour Name
570*5113495bSYour Name return 0;
571*5113495bSYour Name }
572*5113495bSYour Name
573*5113495bSYour Name /**
574*5113495bSYour Name * hif_fixup_write_param() - Tweak the address and length parameters
575*5113495bSYour Name * @pdev: The pointer to the hif device object
576*5113495bSYour Name * @req:
577*5113495bSYour Name * @length: The length pointer
578*5113495bSYour Name * @addr: The addr pointer
579*5113495bSYour Name *
580*5113495bSYour Name * Return: None
581*5113495bSYour Name */
hif_fixup_write_param(struct hif_sdio_dev * pdev,uint32_t req,uint32_t * length,uint32_t * addr)582*5113495bSYour Name void hif_fixup_write_param(struct hif_sdio_dev *pdev, uint32_t req,
583*5113495bSYour Name uint32_t *length, uint32_t *addr)
584*5113495bSYour Name {
585*5113495bSYour Name struct hif_device_mbox_info mboxinfo;
586*5113495bSYour Name uint32_t taddr = *addr, mboxlen = 0;
587*5113495bSYour Name
588*5113495bSYour Name hif_configure_device(NULL, pdev, HIF_DEVICE_GET_FIFO_ADDR,
589*5113495bSYour Name &mboxinfo, sizeof(mboxinfo));
590*5113495bSYour Name
591*5113495bSYour Name if (taddr >= 0x800 && taddr < 0xC00) {
592*5113495bSYour Name /* Host control register and CIS Window */
593*5113495bSYour Name mboxlen = 0;
594*5113495bSYour Name } else if (taddr == mboxinfo.mbox_addresses[0] ||
595*5113495bSYour Name taddr == mboxinfo.mbox_addresses[1] ||
596*5113495bSYour Name taddr == mboxinfo.mbox_addresses[2] ||
597*5113495bSYour Name taddr == mboxinfo.mbox_addresses[3]) {
598*5113495bSYour Name mboxlen = HIF_MBOX_WIDTH;
599*5113495bSYour Name } else if (taddr == mboxinfo.mbox_prop[0].extended_address) {
600*5113495bSYour Name mboxlen = mboxinfo.mbox_prop[0].extended_size;
601*5113495bSYour Name } else if (taddr == mboxinfo.mbox_prop[1].extended_address) {
602*5113495bSYour Name mboxlen = mboxinfo.mbox_prop[1].extended_size;
603*5113495bSYour Name } else {
604*5113495bSYour Name hif_err("Invalid write addr: 0x%08x", taddr);
605*5113495bSYour Name return;
606*5113495bSYour Name }
607*5113495bSYour Name
608*5113495bSYour Name if (mboxlen != 0) {
609*5113495bSYour Name if (*length > mboxlen) {
610*5113495bSYour Name hif_err("Error (%u > %u)", *length, mboxlen);
611*5113495bSYour Name return;
612*5113495bSYour Name }
613*5113495bSYour Name
614*5113495bSYour Name taddr = taddr + (mboxlen - *length);
615*5113495bSYour Name taddr = taddr + ((req & HIF_DUMMY_SPACE_MASK) >> 16);
616*5113495bSYour Name *addr = taddr;
617*5113495bSYour Name }
618*5113495bSYour Name }
619*5113495bSYour Name
620*5113495bSYour Name /**
621*5113495bSYour Name * hif_dev_recv_packet() - Receive HTC packet/packet information from device
622*5113495bSYour Name * @pdev : HIF device object
623*5113495bSYour Name * @packet : The HTC packet pointer
624*5113495bSYour Name * @recv_length : The length of information to be received
625*5113495bSYour Name * @mbox_index : The mailbox that contains this information
626*5113495bSYour Name *
627*5113495bSYour Name * Return 0 for success and non zero of error
628*5113495bSYour Name */
hif_dev_recv_packet(struct hif_sdio_device * pdev,HTC_PACKET * packet,uint32_t recv_length,uint32_t mbox_index)629*5113495bSYour Name static QDF_STATUS hif_dev_recv_packet(struct hif_sdio_device *pdev,
630*5113495bSYour Name HTC_PACKET *packet,
631*5113495bSYour Name uint32_t recv_length,
632*5113495bSYour Name uint32_t mbox_index)
633*5113495bSYour Name {
634*5113495bSYour Name QDF_STATUS status;
635*5113495bSYour Name uint32_t padded_length;
636*5113495bSYour Name bool sync = (packet->Completion) ? false : true;
637*5113495bSYour Name uint32_t req = sync ? HIF_RD_SYNC_BLOCK_FIX : HIF_RD_ASYNC_BLOCK_FIX;
638*5113495bSYour Name
639*5113495bSYour Name /* adjust the length to be a multiple of block size if appropriate */
640*5113495bSYour Name padded_length = DEV_CALC_RECV_PADDED_LEN(pdev, recv_length);
641*5113495bSYour Name
642*5113495bSYour Name if (padded_length > packet->BufferLength) {
643*5113495bSYour Name hif_err("No space for padlen:%d recvlen:%d bufferlen:%d",
644*5113495bSYour Name padded_length,
645*5113495bSYour Name recv_length, packet->BufferLength);
646*5113495bSYour Name if (packet->Completion) {
647*5113495bSYour Name COMPLETE_HTC_PACKET(packet, QDF_STATUS_E_INVAL);
648*5113495bSYour Name return QDF_STATUS_SUCCESS;
649*5113495bSYour Name }
650*5113495bSYour Name return QDF_STATUS_E_INVAL;
651*5113495bSYour Name }
652*5113495bSYour Name
653*5113495bSYour Name /* mailbox index is saved in Endpoint member */
654*5113495bSYour Name hif_debug("hdr:0x%x, len:%d, padded length: %d Mbox:0x%x",
655*5113495bSYour Name packet->PktInfo.AsRx.ExpectedHdr, recv_length,
656*5113495bSYour Name padded_length, mbox_index);
657*5113495bSYour Name
658*5113495bSYour Name status = hif_read_write(pdev->HIFDevice,
659*5113495bSYour Name pdev->MailBoxInfo.mbox_addresses[mbox_index],
660*5113495bSYour Name packet->pBuffer,
661*5113495bSYour Name padded_length,
662*5113495bSYour Name req, sync ? NULL : packet);
663*5113495bSYour Name
664*5113495bSYour Name if (status != QDF_STATUS_SUCCESS && status != QDF_STATUS_E_PENDING)
665*5113495bSYour Name hif_err("Failed %d", status);
666*5113495bSYour Name
667*5113495bSYour Name if (sync) {
668*5113495bSYour Name packet->Status = status;
669*5113495bSYour Name if (status == QDF_STATUS_SUCCESS) {
670*5113495bSYour Name HTC_FRAME_HDR *hdr = (HTC_FRAME_HDR *) packet->pBuffer;
671*5113495bSYour Name
672*5113495bSYour Name hif_debug("EP:%d,Len:%d,Flg:%d,CB:0x%02X,0x%02X",
673*5113495bSYour Name hdr->EndpointID, hdr->PayloadLen,
674*5113495bSYour Name hdr->Flags, hdr->ControlBytes0,
675*5113495bSYour Name hdr->ControlBytes1);
676*5113495bSYour Name }
677*5113495bSYour Name }
678*5113495bSYour Name
679*5113495bSYour Name return status;
680*5113495bSYour Name }
681*5113495bSYour Name
hif_dev_issue_recv_packet_bundle(struct hif_sdio_device * pdev,HTC_PACKET_QUEUE * recv_pkt_queue,HTC_PACKET_QUEUE * sync_completion_queue,uint8_t mail_box_index,int * num_packets_fetched,bool partial_bundle)682*5113495bSYour Name static QDF_STATUS hif_dev_issue_recv_packet_bundle
683*5113495bSYour Name (
684*5113495bSYour Name struct hif_sdio_device *pdev,
685*5113495bSYour Name HTC_PACKET_QUEUE *recv_pkt_queue,
686*5113495bSYour Name HTC_PACKET_QUEUE *sync_completion_queue,
687*5113495bSYour Name uint8_t mail_box_index,
688*5113495bSYour Name int *num_packets_fetched,
689*5113495bSYour Name bool partial_bundle
690*5113495bSYour Name )
691*5113495bSYour Name {
692*5113495bSYour Name uint32_t padded_length;
693*5113495bSYour Name int i, total_length = 0;
694*5113495bSYour Name HTC_TARGET *target = NULL;
695*5113495bSYour Name int bundleSpaceRemaining = 0;
696*5113495bSYour Name unsigned char *bundle_buffer = NULL;
697*5113495bSYour Name HTC_PACKET *packet, *packet_rx_bundle;
698*5113495bSYour Name QDF_STATUS status = QDF_STATUS_SUCCESS;
699*5113495bSYour Name
700*5113495bSYour Name target = (HTC_TARGET *)pdev->pTarget;
701*5113495bSYour Name
702*5113495bSYour Name if ((HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue) -
703*5113495bSYour Name HTC_MAX_MSG_PER_BUNDLE_RX) > 0) {
704*5113495bSYour Name partial_bundle = true;
705*5113495bSYour Name hif_warn("partial bundle detected num: %d, %d",
706*5113495bSYour Name HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue),
707*5113495bSYour Name HTC_MAX_MSG_PER_BUNDLE_RX);
708*5113495bSYour Name }
709*5113495bSYour Name
710*5113495bSYour Name bundleSpaceRemaining =
711*5113495bSYour Name HTC_MAX_MSG_PER_BUNDLE_RX * target->TargetCreditSize;
712*5113495bSYour Name packet_rx_bundle = allocate_htc_bundle_packet(target);
713*5113495bSYour Name if (!packet_rx_bundle) {
714*5113495bSYour Name hif_err("packet_rx_bundle is NULL");
715*5113495bSYour Name qdf_sleep(NBUF_ALLOC_FAIL_WAIT_TIME); /* 100 msec sleep */
716*5113495bSYour Name return QDF_STATUS_E_NOMEM;
717*5113495bSYour Name }
718*5113495bSYour Name bundle_buffer = packet_rx_bundle->pBuffer;
719*5113495bSYour Name
720*5113495bSYour Name for (i = 0;
721*5113495bSYour Name !HTC_QUEUE_EMPTY(recv_pkt_queue) && i < HTC_MAX_MSG_PER_BUNDLE_RX;
722*5113495bSYour Name i++) {
723*5113495bSYour Name packet = htc_packet_dequeue(recv_pkt_queue);
724*5113495bSYour Name A_ASSERT(packet);
725*5113495bSYour Name if (!packet)
726*5113495bSYour Name break;
727*5113495bSYour Name padded_length =
728*5113495bSYour Name DEV_CALC_RECV_PADDED_LEN(pdev, packet->ActualLength);
729*5113495bSYour Name if (packet->PktInfo.AsRx.HTCRxFlags &
730*5113495bSYour Name HTC_RX_PKT_LAST_BUNDLED_PKT_HAS_ADDTIONAL_BLOCK)
731*5113495bSYour Name padded_length += HIF_BLOCK_SIZE;
732*5113495bSYour Name if ((bundleSpaceRemaining - padded_length) < 0) {
733*5113495bSYour Name /* exceeds what we can transfer, put the packet back */
734*5113495bSYour Name HTC_PACKET_ENQUEUE_TO_HEAD(recv_pkt_queue, packet);
735*5113495bSYour Name break;
736*5113495bSYour Name }
737*5113495bSYour Name bundleSpaceRemaining -= padded_length;
738*5113495bSYour Name
739*5113495bSYour Name if (partial_bundle ||
740*5113495bSYour Name HTC_PACKET_QUEUE_DEPTH(recv_pkt_queue) > 0) {
741*5113495bSYour Name packet->PktInfo.AsRx.HTCRxFlags |=
742*5113495bSYour Name HTC_RX_PKT_IGNORE_LOOKAHEAD;
743*5113495bSYour Name }
744*5113495bSYour Name packet->PktInfo.AsRx.HTCRxFlags |= HTC_RX_PKT_PART_OF_BUNDLE;
745*5113495bSYour Name
746*5113495bSYour Name if (sync_completion_queue)
747*5113495bSYour Name HTC_PACKET_ENQUEUE(sync_completion_queue, packet);
748*5113495bSYour Name
749*5113495bSYour Name total_length += padded_length;
750*5113495bSYour Name }
751*5113495bSYour Name #if DEBUG_BUNDLE
752*5113495bSYour Name qdf_print("Recv bundle count %d, length %d.",
753*5113495bSYour Name sync_completion_queue ?
754*5113495bSYour Name HTC_PACKET_QUEUE_DEPTH(sync_completion_queue) : 0,
755*5113495bSYour Name total_length);
756*5113495bSYour Name #endif
757*5113495bSYour Name
758*5113495bSYour Name status = hif_read_write(pdev->HIFDevice,
759*5113495bSYour Name pdev->MailBoxInfo.
760*5113495bSYour Name mbox_addresses[(int)mail_box_index],
761*5113495bSYour Name bundle_buffer, total_length,
762*5113495bSYour Name HIF_RD_SYNC_BLOCK_FIX, NULL);
763*5113495bSYour Name
764*5113495bSYour Name if (status != QDF_STATUS_SUCCESS) {
765*5113495bSYour Name hif_err("hif_send Failed status:%d", status);
766*5113495bSYour Name } else {
767*5113495bSYour Name unsigned char *buffer = bundle_buffer;
768*5113495bSYour Name *num_packets_fetched = i;
769*5113495bSYour Name if (sync_completion_queue) {
770*5113495bSYour Name HTC_PACKET_QUEUE_ITERATE_ALLOW_REMOVE(
771*5113495bSYour Name sync_completion_queue, packet) {
772*5113495bSYour Name padded_length =
773*5113495bSYour Name DEV_CALC_RECV_PADDED_LEN(pdev,
774*5113495bSYour Name packet->ActualLength);
775*5113495bSYour Name if (packet->PktInfo.AsRx.HTCRxFlags &
776*5113495bSYour Name HTC_RX_PKT_LAST_BUNDLED_PKT_HAS_ADDTIONAL_BLOCK)
777*5113495bSYour Name padded_length +=
778*5113495bSYour Name HIF_BLOCK_SIZE;
779*5113495bSYour Name A_MEMCPY(packet->pBuffer,
780*5113495bSYour Name buffer, padded_length);
781*5113495bSYour Name buffer += padded_length;
782*5113495bSYour Name } HTC_PACKET_QUEUE_ITERATE_END;
783*5113495bSYour Name }
784*5113495bSYour Name }
785*5113495bSYour Name /* free bundle space under Sync mode */
786*5113495bSYour Name free_htc_bundle_packet(target, packet_rx_bundle);
787*5113495bSYour Name return status;
788*5113495bSYour Name }
789*5113495bSYour Name
790*5113495bSYour Name #define ISSUE_BUNDLE hif_dev_issue_recv_packet_bundle
791*5113495bSYour Name static
hif_dev_recv_message_pending_handler(struct hif_sdio_device * pdev,uint8_t mail_box_index,uint32_t msg_look_aheads[],int num_look_aheads,bool * async_proc,int * num_pkts_fetched)792*5113495bSYour Name QDF_STATUS hif_dev_recv_message_pending_handler(struct hif_sdio_device *pdev,
793*5113495bSYour Name uint8_t mail_box_index,
794*5113495bSYour Name uint32_t msg_look_aheads[],
795*5113495bSYour Name int num_look_aheads,
796*5113495bSYour Name bool *async_proc,
797*5113495bSYour Name int *num_pkts_fetched)
798*5113495bSYour Name {
799*5113495bSYour Name int pkts_fetched;
800*5113495bSYour Name HTC_PACKET *pkt;
801*5113495bSYour Name HTC_ENDPOINT_ID id;
802*5113495bSYour Name bool partial_bundle;
803*5113495bSYour Name int total_fetched = 0;
804*5113495bSYour Name bool asyncProc = false;
805*5113495bSYour Name QDF_STATUS status = QDF_STATUS_SUCCESS;
806*5113495bSYour Name uint32_t look_aheads[HTC_MAX_MSG_PER_BUNDLE_RX];
807*5113495bSYour Name HTC_PACKET_QUEUE recv_q, sync_comp_q;
808*5113495bSYour Name QDF_STATUS (*rxCompletion)(void *, qdf_nbuf_t, uint8_t);
809*5113495bSYour Name
810*5113495bSYour Name hif_debug("NumLookAheads: %d", num_look_aheads);
811*5113495bSYour Name
812*5113495bSYour Name if (num_pkts_fetched)
813*5113495bSYour Name *num_pkts_fetched = 0;
814*5113495bSYour Name
815*5113495bSYour Name if (IS_DEV_IRQ_PROCESSING_ASYNC_ALLOWED(pdev)) {
816*5113495bSYour Name /* We use async mode to get the packets if the
817*5113495bSYour Name * device layer supports it. The device layer
818*5113495bSYour Name * interfaces with HIF in which HIF may have
819*5113495bSYour Name * restrictions on how interrupts are processed
820*5113495bSYour Name */
821*5113495bSYour Name asyncProc = true;
822*5113495bSYour Name }
823*5113495bSYour Name
824*5113495bSYour Name if (async_proc) {
825*5113495bSYour Name /* indicate to caller how we decided to process this */
826*5113495bSYour Name *async_proc = asyncProc;
827*5113495bSYour Name }
828*5113495bSYour Name
829*5113495bSYour Name if (num_look_aheads > HTC_MAX_MSG_PER_BUNDLE_RX) {
830*5113495bSYour Name A_ASSERT(false);
831*5113495bSYour Name return QDF_STATUS_E_PROTO;
832*5113495bSYour Name }
833*5113495bSYour Name
834*5113495bSYour Name A_MEMCPY(look_aheads, msg_look_aheads,
835*5113495bSYour Name (sizeof(uint32_t)) * num_look_aheads);
836*5113495bSYour Name while (true) {
837*5113495bSYour Name /* reset packets queues */
838*5113495bSYour Name INIT_HTC_PACKET_QUEUE(&recv_q);
839*5113495bSYour Name INIT_HTC_PACKET_QUEUE(&sync_comp_q);
840*5113495bSYour Name if (num_look_aheads > HTC_MAX_MSG_PER_BUNDLE_RX) {
841*5113495bSYour Name status = QDF_STATUS_E_PROTO;
842*5113495bSYour Name A_ASSERT(false);
843*5113495bSYour Name break;
844*5113495bSYour Name }
845*5113495bSYour Name
846*5113495bSYour Name /* first lookahead sets the expected endpoint IDs for
847*5113495bSYour Name * all packets in a bundle
848*5113495bSYour Name */
849*5113495bSYour Name id = ((HTC_FRAME_HDR *)&look_aheads[0])->EndpointID;
850*5113495bSYour Name
851*5113495bSYour Name if (id >= ENDPOINT_MAX) {
852*5113495bSYour Name hif_err("Invalid Endpoint in lookahead: %d", id);
853*5113495bSYour Name status = QDF_STATUS_E_PROTO;
854*5113495bSYour Name break;
855*5113495bSYour Name }
856*5113495bSYour Name /* try to allocate as many HTC RX packets indicated
857*5113495bSYour Name * by the lookaheads these packets are stored
858*5113495bSYour Name * in the recvPkt queue
859*5113495bSYour Name */
860*5113495bSYour Name status = hif_dev_alloc_and_prepare_rx_packets(pdev,
861*5113495bSYour Name look_aheads,
862*5113495bSYour Name num_look_aheads,
863*5113495bSYour Name &recv_q);
864*5113495bSYour Name if (QDF_IS_STATUS_ERROR(status))
865*5113495bSYour Name break;
866*5113495bSYour Name total_fetched += HTC_PACKET_QUEUE_DEPTH(&recv_q);
867*5113495bSYour Name
868*5113495bSYour Name /* we've got packet buffers for all we can currently fetch,
869*5113495bSYour Name * this count is not valid anymore
870*5113495bSYour Name */
871*5113495bSYour Name num_look_aheads = 0;
872*5113495bSYour Name partial_bundle = false;
873*5113495bSYour Name
874*5113495bSYour Name /* now go fetch the list of HTC packets */
875*5113495bSYour Name while (!HTC_QUEUE_EMPTY(&recv_q)) {
876*5113495bSYour Name pkts_fetched = 0;
877*5113495bSYour Name if ((HTC_PACKET_QUEUE_DEPTH(&recv_q) > 1)) {
878*5113495bSYour Name /* there are enough packets to attempt a bundle
879*5113495bSYour Name * transfer and recv bundling is allowed
880*5113495bSYour Name */
881*5113495bSYour Name status = ISSUE_BUNDLE(pdev,
882*5113495bSYour Name &recv_q,
883*5113495bSYour Name asyncProc ? NULL :
884*5113495bSYour Name &sync_comp_q,
885*5113495bSYour Name mail_box_index,
886*5113495bSYour Name &pkts_fetched,
887*5113495bSYour Name partial_bundle);
888*5113495bSYour Name if (QDF_IS_STATUS_ERROR(status)) {
889*5113495bSYour Name hif_dev_free_recv_pkt_queue(
890*5113495bSYour Name &recv_q);
891*5113495bSYour Name break;
892*5113495bSYour Name }
893*5113495bSYour Name
894*5113495bSYour Name if (HTC_PACKET_QUEUE_DEPTH(&recv_q) !=
895*5113495bSYour Name 0) {
896*5113495bSYour Name /* we couldn't fetch all packets at one,
897*5113495bSYour Name * time this creates a broken
898*5113495bSYour Name * bundle
899*5113495bSYour Name */
900*5113495bSYour Name partial_bundle = true;
901*5113495bSYour Name }
902*5113495bSYour Name }
903*5113495bSYour Name
904*5113495bSYour Name /* see if the previous operation fetched any
905*5113495bSYour Name * packets using bundling
906*5113495bSYour Name */
907*5113495bSYour Name if (pkts_fetched == 0) {
908*5113495bSYour Name /* dequeue one packet */
909*5113495bSYour Name pkt = htc_packet_dequeue(&recv_q);
910*5113495bSYour Name A_ASSERT(pkt);
911*5113495bSYour Name if (!pkt)
912*5113495bSYour Name break;
913*5113495bSYour Name
914*5113495bSYour Name pkt->Completion = NULL;
915*5113495bSYour Name
916*5113495bSYour Name if (HTC_PACKET_QUEUE_DEPTH(&recv_q) >
917*5113495bSYour Name 0) {
918*5113495bSYour Name /* lookaheads in all packets except the
919*5113495bSYour Name * last one in must be ignored
920*5113495bSYour Name */
921*5113495bSYour Name pkt->PktInfo.AsRx.HTCRxFlags |=
922*5113495bSYour Name HTC_RX_PKT_IGNORE_LOOKAHEAD;
923*5113495bSYour Name }
924*5113495bSYour Name
925*5113495bSYour Name /* go fetch the packet */
926*5113495bSYour Name status =
927*5113495bSYour Name hif_dev_recv_packet(pdev, pkt,
928*5113495bSYour Name pkt->ActualLength,
929*5113495bSYour Name mail_box_index);
930*5113495bSYour Name while (QDF_IS_STATUS_ERROR(status) &&
931*5113495bSYour Name !HTC_QUEUE_EMPTY(&recv_q)) {
932*5113495bSYour Name qdf_nbuf_t nbuf;
933*5113495bSYour Name
934*5113495bSYour Name pkt = htc_packet_dequeue(&recv_q);
935*5113495bSYour Name if (!pkt)
936*5113495bSYour Name break;
937*5113495bSYour Name nbuf = pkt->pNetBufContext;
938*5113495bSYour Name if (nbuf)
939*5113495bSYour Name qdf_nbuf_free(nbuf);
940*5113495bSYour Name }
941*5113495bSYour Name
942*5113495bSYour Name if (QDF_IS_STATUS_ERROR(status))
943*5113495bSYour Name break;
944*5113495bSYour Name /* sent synchronously, queue this packet for
945*5113495bSYour Name * synchronous completion
946*5113495bSYour Name */
947*5113495bSYour Name HTC_PACKET_ENQUEUE(&sync_comp_q, pkt);
948*5113495bSYour Name }
949*5113495bSYour Name }
950*5113495bSYour Name
951*5113495bSYour Name /* synchronous handling */
952*5113495bSYour Name if (pdev->DSRCanYield) {
953*5113495bSYour Name /* for the SYNC case, increment count that tracks
954*5113495bSYour Name * when the DSR should yield
955*5113495bSYour Name */
956*5113495bSYour Name pdev->CurrentDSRRecvCount++;
957*5113495bSYour Name }
958*5113495bSYour Name
959*5113495bSYour Name /* in the sync case, all packet buffers are now filled,
960*5113495bSYour Name * we can process each packet, check lookahead , then repeat
961*5113495bSYour Name */
962*5113495bSYour Name rxCompletion = pdev->hif_callbacks.rxCompletionHandler;
963*5113495bSYour Name
964*5113495bSYour Name /* unload sync completion queue */
965*5113495bSYour Name while (!HTC_QUEUE_EMPTY(&sync_comp_q)) {
966*5113495bSYour Name uint8_t pipeid;
967*5113495bSYour Name qdf_nbuf_t netbuf;
968*5113495bSYour Name
969*5113495bSYour Name pkt = htc_packet_dequeue(&sync_comp_q);
970*5113495bSYour Name A_ASSERT(pkt);
971*5113495bSYour Name if (!pkt)
972*5113495bSYour Name break;
973*5113495bSYour Name
974*5113495bSYour Name num_look_aheads = 0;
975*5113495bSYour Name status = hif_dev_process_recv_header(pdev, pkt,
976*5113495bSYour Name look_aheads,
977*5113495bSYour Name &num_look_aheads);
978*5113495bSYour Name if (QDF_IS_STATUS_ERROR(status)) {
979*5113495bSYour Name HTC_PACKET_ENQUEUE_TO_HEAD(&sync_comp_q, pkt);
980*5113495bSYour Name break;
981*5113495bSYour Name }
982*5113495bSYour Name
983*5113495bSYour Name netbuf = (qdf_nbuf_t)pkt->pNetBufContext;
984*5113495bSYour Name /* set data length */
985*5113495bSYour Name qdf_nbuf_put_tail(netbuf, pkt->ActualLength);
986*5113495bSYour Name
987*5113495bSYour Name if (rxCompletion) {
988*5113495bSYour Name pipeid =
989*5113495bSYour Name hif_dev_map_mail_box_to_pipe(pdev,
990*5113495bSYour Name mail_box_index,
991*5113495bSYour Name true);
992*5113495bSYour Name rxCompletion(pdev->hif_callbacks.Context,
993*5113495bSYour Name netbuf, pipeid);
994*5113495bSYour Name }
995*5113495bSYour Name }
996*5113495bSYour Name
997*5113495bSYour Name if (QDF_IS_STATUS_ERROR(status)) {
998*5113495bSYour Name if (!HTC_QUEUE_EMPTY(&sync_comp_q))
999*5113495bSYour Name hif_dev_free_recv_pkt_queue(
1000*5113495bSYour Name &sync_comp_q);
1001*5113495bSYour Name break;
1002*5113495bSYour Name }
1003*5113495bSYour Name
1004*5113495bSYour Name if (num_look_aheads == 0) {
1005*5113495bSYour Name /* no more look aheads */
1006*5113495bSYour Name break;
1007*5113495bSYour Name }
1008*5113495bSYour Name /* check whether other OS contexts have queued any WMI
1009*5113495bSYour Name * command/data for WLAN. This check is needed only if WLAN
1010*5113495bSYour Name * Tx and Rx happens in same thread context
1011*5113495bSYour Name */
1012*5113495bSYour Name /* A_CHECK_DRV_TX(); */
1013*5113495bSYour Name }
1014*5113495bSYour Name if (num_pkts_fetched)
1015*5113495bSYour Name *num_pkts_fetched = total_fetched;
1016*5113495bSYour Name
1017*5113495bSYour Name AR_DEBUG_PRINTF(ATH_DEBUG_RECV, ("-HTCRecvMessagePendingHandler\n"));
1018*5113495bSYour Name return status;
1019*5113495bSYour Name }
1020*5113495bSYour Name
1021*5113495bSYour Name /**
1022*5113495bSYour Name * hif_dev_service_cpu_interrupt() - service fatal interrupts
1023*5113495bSYour Name * synchronously
1024*5113495bSYour Name *
1025*5113495bSYour Name * @pdev: hif sdio device context
1026*5113495bSYour Name *
1027*5113495bSYour Name * Return: QDF_STATUS_SUCCESS for success
1028*5113495bSYour Name */
hif_dev_service_cpu_interrupt(struct hif_sdio_device * pdev)1029*5113495bSYour Name static QDF_STATUS hif_dev_service_cpu_interrupt(struct hif_sdio_device *pdev)
1030*5113495bSYour Name {
1031*5113495bSYour Name QDF_STATUS status;
1032*5113495bSYour Name uint8_t reg_buffer[4];
1033*5113495bSYour Name uint8_t cpu_int_status;
1034*5113495bSYour Name
1035*5113495bSYour Name cpu_int_status = mboxProcRegs(pdev).cpu_int_status &
1036*5113495bSYour Name mboxEnaRegs(pdev).cpu_int_status_enable;
1037*5113495bSYour Name
1038*5113495bSYour Name hif_err("CPU intr status: 0x%x", (uint32_t)cpu_int_status);
1039*5113495bSYour Name
1040*5113495bSYour Name /* Clear the interrupt */
1041*5113495bSYour Name mboxProcRegs(pdev).cpu_int_status &= ~cpu_int_status;
1042*5113495bSYour Name
1043*5113495bSYour Name /*set up the register transfer buffer to hit the register
1044*5113495bSYour Name * 4 times , this is done to make the access 4-byte aligned
1045*5113495bSYour Name * to mitigate issues with host bus interconnects that
1046*5113495bSYour Name * restrict bus transfer lengths to be a multiple of 4-bytes
1047*5113495bSYour Name * set W1C value to clear the interrupt, this hits the register
1048*5113495bSYour Name * first
1049*5113495bSYour Name */
1050*5113495bSYour Name reg_buffer[0] = cpu_int_status;
1051*5113495bSYour Name /* the remaining 4 values are set to zero which have no-effect */
1052*5113495bSYour Name reg_buffer[1] = 0;
1053*5113495bSYour Name reg_buffer[2] = 0;
1054*5113495bSYour Name reg_buffer[3] = 0;
1055*5113495bSYour Name
1056*5113495bSYour Name status = hif_read_write(pdev->HIFDevice,
1057*5113495bSYour Name CPU_INT_STATUS_ADDRESS,
1058*5113495bSYour Name reg_buffer, 4, HIF_WR_SYNC_BYTE_FIX, NULL);
1059*5113495bSYour Name
1060*5113495bSYour Name A_ASSERT(status == QDF_STATUS_SUCCESS);
1061*5113495bSYour Name
1062*5113495bSYour Name /* The Interrupt sent to the Host is generated via bit0
1063*5113495bSYour Name * of CPU INT register
1064*5113495bSYour Name */
1065*5113495bSYour Name if (cpu_int_status & 0x1) {
1066*5113495bSYour Name if (pdev->hif_callbacks.fwEventHandler)
1067*5113495bSYour Name /* It calls into HTC which propagates this
1068*5113495bSYour Name * to ol_target_failure()
1069*5113495bSYour Name */
1070*5113495bSYour Name pdev->hif_callbacks.fwEventHandler(
1071*5113495bSYour Name pdev->hif_callbacks.Context,
1072*5113495bSYour Name QDF_STATUS_E_FAILURE);
1073*5113495bSYour Name } else {
1074*5113495bSYour Name hif_err("Unrecognized CPU event");
1075*5113495bSYour Name }
1076*5113495bSYour Name
1077*5113495bSYour Name return status;
1078*5113495bSYour Name }
1079*5113495bSYour Name
1080*5113495bSYour Name /**
1081*5113495bSYour Name * hif_dev_service_error_interrupt() - service error interrupts
1082*5113495bSYour Name * synchronously
1083*5113495bSYour Name *
1084*5113495bSYour Name * @pdev: hif sdio device context
1085*5113495bSYour Name *
1086*5113495bSYour Name * Return: QDF_STATUS_SUCCESS for success
1087*5113495bSYour Name */
hif_dev_service_error_interrupt(struct hif_sdio_device * pdev)1088*5113495bSYour Name static QDF_STATUS hif_dev_service_error_interrupt(struct hif_sdio_device *pdev)
1089*5113495bSYour Name {
1090*5113495bSYour Name QDF_STATUS status;
1091*5113495bSYour Name uint8_t reg_buffer[4];
1092*5113495bSYour Name uint8_t error_int_status = 0;
1093*5113495bSYour Name
1094*5113495bSYour Name error_int_status = mboxProcRegs(pdev).error_int_status & 0x0F;
1095*5113495bSYour Name hif_err("Err intr status: 0x%x", error_int_status);
1096*5113495bSYour Name
1097*5113495bSYour Name if (ERROR_INT_STATUS_WAKEUP_GET(error_int_status))
1098*5113495bSYour Name hif_err("Error : Wakeup");
1099*5113495bSYour Name
1100*5113495bSYour Name if (ERROR_INT_STATUS_RX_UNDERFLOW_GET(error_int_status))
1101*5113495bSYour Name hif_err("Error : Rx Underflow");
1102*5113495bSYour Name
1103*5113495bSYour Name if (ERROR_INT_STATUS_TX_OVERFLOW_GET(error_int_status))
1104*5113495bSYour Name hif_err("Error : Tx Overflow");
1105*5113495bSYour Name
1106*5113495bSYour Name /* Clear the interrupt */
1107*5113495bSYour Name mboxProcRegs(pdev).error_int_status &= ~error_int_status;
1108*5113495bSYour Name
1109*5113495bSYour Name /* set up the register transfer buffer to hit the register
1110*5113495bSYour Name * 4 times , this is done to make the access 4-byte
1111*5113495bSYour Name * aligned to mitigate issues with host bus interconnects that
1112*5113495bSYour Name * restrict bus transfer lengths to be a multiple of 4-bytes
1113*5113495bSYour Name */
1114*5113495bSYour Name
1115*5113495bSYour Name /* set W1C value to clear the interrupt */
1116*5113495bSYour Name reg_buffer[0] = error_int_status;
1117*5113495bSYour Name /* the remaining 4 values are set to zero which have no-effect */
1118*5113495bSYour Name reg_buffer[1] = 0;
1119*5113495bSYour Name reg_buffer[2] = 0;
1120*5113495bSYour Name reg_buffer[3] = 0;
1121*5113495bSYour Name
1122*5113495bSYour Name status = hif_read_write(pdev->HIFDevice,
1123*5113495bSYour Name ERROR_INT_STATUS_ADDRESS,
1124*5113495bSYour Name reg_buffer, 4, HIF_WR_SYNC_BYTE_FIX, NULL);
1125*5113495bSYour Name
1126*5113495bSYour Name A_ASSERT(status == QDF_STATUS_SUCCESS);
1127*5113495bSYour Name return status;
1128*5113495bSYour Name }
1129*5113495bSYour Name
1130*5113495bSYour Name /**
1131*5113495bSYour Name * hif_dev_service_debug_interrupt() - service debug interrupts
1132*5113495bSYour Name * synchronously
1133*5113495bSYour Name *
1134*5113495bSYour Name * @pdev: hif sdio device context
1135*5113495bSYour Name *
1136*5113495bSYour Name * Return: QDF_STATUS_SUCCESS for success
1137*5113495bSYour Name */
hif_dev_service_debug_interrupt(struct hif_sdio_device * pdev)1138*5113495bSYour Name static QDF_STATUS hif_dev_service_debug_interrupt(struct hif_sdio_device *pdev)
1139*5113495bSYour Name {
1140*5113495bSYour Name uint32_t dummy;
1141*5113495bSYour Name QDF_STATUS status;
1142*5113495bSYour Name
1143*5113495bSYour Name /* Send a target failure event to the application */
1144*5113495bSYour Name hif_err("Target debug interrupt");
1145*5113495bSYour Name
1146*5113495bSYour Name /* clear the interrupt , the debug error interrupt is counter 0
1147*5113495bSYour Name * read counter to clear interrupt
1148*5113495bSYour Name */
1149*5113495bSYour Name status = hif_read_write(pdev->HIFDevice,
1150*5113495bSYour Name COUNT_DEC_ADDRESS,
1151*5113495bSYour Name (uint8_t *)&dummy,
1152*5113495bSYour Name 4, HIF_RD_SYNC_BYTE_INC, NULL);
1153*5113495bSYour Name
1154*5113495bSYour Name A_ASSERT(status == QDF_STATUS_SUCCESS);
1155*5113495bSYour Name return status;
1156*5113495bSYour Name }
1157*5113495bSYour Name
1158*5113495bSYour Name /**
1159*5113495bSYour Name * hif_dev_service_counter_interrupt() - service counter interrupts
1160*5113495bSYour Name * synchronously
1161*5113495bSYour Name * @pdev: hif sdio device context
1162*5113495bSYour Name *
1163*5113495bSYour Name * Return: QDF_STATUS_SUCCESS for success
1164*5113495bSYour Name */
1165*5113495bSYour Name static
hif_dev_service_counter_interrupt(struct hif_sdio_device * pdev)1166*5113495bSYour Name QDF_STATUS hif_dev_service_counter_interrupt(struct hif_sdio_device *pdev)
1167*5113495bSYour Name {
1168*5113495bSYour Name uint8_t counter_int_status;
1169*5113495bSYour Name
1170*5113495bSYour Name AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("Counter Interrupt\n"));
1171*5113495bSYour Name
1172*5113495bSYour Name counter_int_status = mboxProcRegs(pdev).counter_int_status &
1173*5113495bSYour Name mboxEnaRegs(pdev).counter_int_status_enable;
1174*5113495bSYour Name
1175*5113495bSYour Name AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1176*5113495bSYour Name ("Valid interrupt source in COUNTER_INT_STATUS: 0x%x\n",
1177*5113495bSYour Name counter_int_status));
1178*5113495bSYour Name
1179*5113495bSYour Name /* Check if the debug interrupt is pending
1180*5113495bSYour Name * NOTE: other modules like GMBOX may use the counter interrupt
1181*5113495bSYour Name * for credit flow control on other counters, we only need to
1182*5113495bSYour Name * check for the debug assertion counter interrupt
1183*5113495bSYour Name */
1184*5113495bSYour Name if (counter_int_status & AR6K_TARGET_DEBUG_INTR_MASK)
1185*5113495bSYour Name return hif_dev_service_debug_interrupt(pdev);
1186*5113495bSYour Name
1187*5113495bSYour Name return QDF_STATUS_SUCCESS;
1188*5113495bSYour Name }
1189*5113495bSYour Name
1190*5113495bSYour Name #define RX_LOOAHEAD_GET(pdev, i) \
1191*5113495bSYour Name mboxProcRegs(pdev).rx_lookahead[MAILBOX_LOOKAHEAD_SIZE_IN_WORD * i]
1192*5113495bSYour Name /**
1193*5113495bSYour Name * hif_dev_process_pending_irqs() - process pending interrupts
1194*5113495bSYour Name * @pdev: hif sdio device context
1195*5113495bSYour Name * @done: pending irq completion status
1196*5113495bSYour Name * @async_processing: sync/async processing flag
1197*5113495bSYour Name *
1198*5113495bSYour Name * Return: QDF_STATUS_SUCCESS for success
1199*5113495bSYour Name */
hif_dev_process_pending_irqs(struct hif_sdio_device * pdev,bool * done,bool * async_processing)1200*5113495bSYour Name QDF_STATUS hif_dev_process_pending_irqs(struct hif_sdio_device *pdev,
1201*5113495bSYour Name bool *done,
1202*5113495bSYour Name bool *async_processing)
1203*5113495bSYour Name {
1204*5113495bSYour Name QDF_STATUS status = QDF_STATUS_SUCCESS;
1205*5113495bSYour Name uint8_t host_int_status = 0;
1206*5113495bSYour Name uint32_t l_ahead[MAILBOX_USED_COUNT];
1207*5113495bSYour Name int i;
1208*5113495bSYour Name
1209*5113495bSYour Name qdf_mem_zero(&l_ahead, sizeof(l_ahead));
1210*5113495bSYour Name AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1211*5113495bSYour Name ("+ProcessPendingIRQs: (dev: 0x%lX)\n",
1212*5113495bSYour Name (unsigned long)pdev));
1213*5113495bSYour Name
1214*5113495bSYour Name /* NOTE: the HIF implementation guarantees that the context
1215*5113495bSYour Name * of this call allows us to perform SYNCHRONOUS I/O,
1216*5113495bSYour Name * that is we can block, sleep or call any API that
1217*5113495bSYour Name * can block or switch thread/task ontexts.
1218*5113495bSYour Name * This is a fully schedulable context.
1219*5113495bSYour Name */
1220*5113495bSYour Name do {
1221*5113495bSYour Name if (mboxEnaRegs(pdev).int_status_enable == 0) {
1222*5113495bSYour Name /* interrupt enables have been cleared, do not try
1223*5113495bSYour Name * to process any pending interrupts that
1224*5113495bSYour Name * may result in more bus transactions.
1225*5113495bSYour Name * The target may be unresponsive at this point.
1226*5113495bSYour Name */
1227*5113495bSYour Name break;
1228*5113495bSYour Name }
1229*5113495bSYour Name status = hif_read_write(pdev->HIFDevice,
1230*5113495bSYour Name HOST_INT_STATUS_ADDRESS,
1231*5113495bSYour Name (uint8_t *)&mboxProcRegs(pdev),
1232*5113495bSYour Name sizeof(mboxProcRegs(pdev)),
1233*5113495bSYour Name HIF_RD_SYNC_BYTE_INC, NULL);
1234*5113495bSYour Name
1235*5113495bSYour Name if (QDF_IS_STATUS_ERROR(status))
1236*5113495bSYour Name break;
1237*5113495bSYour Name
1238*5113495bSYour Name if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_IRQ)) {
1239*5113495bSYour Name hif_dev_dump_registers(pdev,
1240*5113495bSYour Name &mboxProcRegs(pdev),
1241*5113495bSYour Name &mboxEnaRegs(pdev),
1242*5113495bSYour Name &mboxCountRegs(pdev));
1243*5113495bSYour Name }
1244*5113495bSYour Name
1245*5113495bSYour Name /* Update only those registers that are enabled */
1246*5113495bSYour Name host_int_status = mboxProcRegs(pdev).host_int_status
1247*5113495bSYour Name & mboxEnaRegs(pdev).int_status_enable;
1248*5113495bSYour Name
1249*5113495bSYour Name /* only look at mailbox status if the HIF layer did not
1250*5113495bSYour Name * provide this function, on some HIF interfaces reading
1251*5113495bSYour Name * the RX lookahead is not valid to do
1252*5113495bSYour Name */
1253*5113495bSYour Name for (i = 0; i < MAILBOX_USED_COUNT; i++) {
1254*5113495bSYour Name l_ahead[i] = 0;
1255*5113495bSYour Name if (host_int_status & (1 << i)) {
1256*5113495bSYour Name /* mask out pending mailbox value, we use
1257*5113495bSYour Name * "lookAhead" as the real flag for
1258*5113495bSYour Name * mailbox processing below
1259*5113495bSYour Name */
1260*5113495bSYour Name host_int_status &= ~(1 << i);
1261*5113495bSYour Name if (mboxProcRegs(pdev).
1262*5113495bSYour Name rx_lookahead_valid & (1 << i)) {
1263*5113495bSYour Name /* mailbox has a message and the
1264*5113495bSYour Name * look ahead is valid
1265*5113495bSYour Name */
1266*5113495bSYour Name l_ahead[i] = RX_LOOAHEAD_GET(pdev, i);
1267*5113495bSYour Name }
1268*5113495bSYour Name }
1269*5113495bSYour Name } /*end of for loop */
1270*5113495bSYour Name } while (false);
1271*5113495bSYour Name
1272*5113495bSYour Name do {
1273*5113495bSYour Name bool bLookAheadValid = false;
1274*5113495bSYour Name /* did the interrupt status fetches succeed? */
1275*5113495bSYour Name if (QDF_IS_STATUS_ERROR(status))
1276*5113495bSYour Name break;
1277*5113495bSYour Name
1278*5113495bSYour Name for (i = 0; i < MAILBOX_USED_COUNT; i++) {
1279*5113495bSYour Name if (l_ahead[i] != 0) {
1280*5113495bSYour Name bLookAheadValid = true;
1281*5113495bSYour Name break;
1282*5113495bSYour Name }
1283*5113495bSYour Name }
1284*5113495bSYour Name
1285*5113495bSYour Name if ((host_int_status == 0) && !bLookAheadValid) {
1286*5113495bSYour Name /* nothing to process, the caller can use this
1287*5113495bSYour Name * to break out of a loop
1288*5113495bSYour Name */
1289*5113495bSYour Name *done = true;
1290*5113495bSYour Name break;
1291*5113495bSYour Name }
1292*5113495bSYour Name
1293*5113495bSYour Name if (bLookAheadValid) {
1294*5113495bSYour Name for (i = 0; i < MAILBOX_USED_COUNT; i++) {
1295*5113495bSYour Name int fetched = 0;
1296*5113495bSYour Name
1297*5113495bSYour Name if (l_ahead[i] == 0)
1298*5113495bSYour Name continue;
1299*5113495bSYour Name AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1300*5113495bSYour Name ("mbox[%d],lookahead:0x%X\n",
1301*5113495bSYour Name i, l_ahead[i]));
1302*5113495bSYour Name /* Mailbox Interrupt, the HTC layer may issue
1303*5113495bSYour Name * async requests to empty the mailbox...
1304*5113495bSYour Name * When emptying the recv mailbox we use the
1305*5113495bSYour Name * async handler from the completion routine of
1306*5113495bSYour Name * routine of the callers read request.
1307*5113495bSYour Name * This can improve performance by reducing
1308*5113495bSYour Name * the context switching when we rapidly
1309*5113495bSYour Name * pull packets
1310*5113495bSYour Name */
1311*5113495bSYour Name status = hif_dev_recv_message_pending_handler(
1312*5113495bSYour Name pdev, i,
1313*5113495bSYour Name &l_ahead
1314*5113495bSYour Name [i], 1,
1315*5113495bSYour Name async_processing,
1316*5113495bSYour Name &fetched);
1317*5113495bSYour Name if (QDF_IS_STATUS_ERROR(status))
1318*5113495bSYour Name break;
1319*5113495bSYour Name
1320*5113495bSYour Name if (!fetched) {
1321*5113495bSYour Name /* HTC could not pull any messages out
1322*5113495bSYour Name * due to lack of resources force DSR
1323*5113495bSYour Name * handle to ack the interrupt
1324*5113495bSYour Name */
1325*5113495bSYour Name *async_processing = false;
1326*5113495bSYour Name pdev->RecheckIRQStatusCnt = 0;
1327*5113495bSYour Name }
1328*5113495bSYour Name }
1329*5113495bSYour Name }
1330*5113495bSYour Name
1331*5113495bSYour Name /* now handle the rest of them */
1332*5113495bSYour Name AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1333*5113495bSYour Name ("Valid source for OTHER interrupts: 0x%x\n",
1334*5113495bSYour Name host_int_status));
1335*5113495bSYour Name
1336*5113495bSYour Name if (HOST_INT_STATUS_CPU_GET(host_int_status)) {
1337*5113495bSYour Name /* CPU Interrupt */
1338*5113495bSYour Name status = hif_dev_service_cpu_interrupt(pdev);
1339*5113495bSYour Name if (QDF_IS_STATUS_ERROR(status))
1340*5113495bSYour Name break;
1341*5113495bSYour Name }
1342*5113495bSYour Name
1343*5113495bSYour Name if (HOST_INT_STATUS_ERROR_GET(host_int_status)) {
1344*5113495bSYour Name /* Error Interrupt */
1345*5113495bSYour Name status = hif_dev_service_error_interrupt(pdev);
1346*5113495bSYour Name if (QDF_IS_STATUS_ERROR(status))
1347*5113495bSYour Name break;
1348*5113495bSYour Name }
1349*5113495bSYour Name
1350*5113495bSYour Name if (HOST_INT_STATUS_COUNTER_GET(host_int_status)) {
1351*5113495bSYour Name /* Counter Interrupt */
1352*5113495bSYour Name status = hif_dev_service_counter_interrupt(pdev);
1353*5113495bSYour Name if (QDF_IS_STATUS_ERROR(status))
1354*5113495bSYour Name break;
1355*5113495bSYour Name }
1356*5113495bSYour Name
1357*5113495bSYour Name } while (false);
1358*5113495bSYour Name
1359*5113495bSYour Name /* an optimization to bypass reading the IRQ status registers
1360*5113495bSYour Name * unnecessarily which can re-wake the target, if upper layers
1361*5113495bSYour Name * determine that we are in a low-throughput mode, we can
1362*5113495bSYour Name * rely on taking another interrupt rather than re-checking
1363*5113495bSYour Name * the status registers which can re-wake the target.
1364*5113495bSYour Name *
1365*5113495bSYour Name * NOTE : for host interfaces that use the special
1366*5113495bSYour Name * GetPendingEventsFunc, this optimization cannot be used due to
1367*5113495bSYour Name * possible side-effects. For example, SPI requires the host
1368*5113495bSYour Name * to drain all messages from the mailbox before exiting
1369*5113495bSYour Name * the ISR routine.
1370*5113495bSYour Name */
1371*5113495bSYour Name if (!(*async_processing) && (pdev->RecheckIRQStatusCnt == 0)) {
1372*5113495bSYour Name AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1373*5113495bSYour Name ("Bypass IRQ Status re-check, forcing done\n"));
1374*5113495bSYour Name *done = true;
1375*5113495bSYour Name }
1376*5113495bSYour Name
1377*5113495bSYour Name AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
1378*5113495bSYour Name ("-ProcessPendingIRQs: (done:%d, async:%d) status=%d\n",
1379*5113495bSYour Name *done, *async_processing, status));
1380*5113495bSYour Name
1381*5113495bSYour Name return status;
1382*5113495bSYour Name }
1383*5113495bSYour Name
1384*5113495bSYour Name #define DEV_CHECK_RECV_YIELD(pdev) \
1385*5113495bSYour Name ((pdev)->CurrentDSRRecvCount >= \
1386*5113495bSYour Name (pdev)->HifIRQYieldParams.recv_packet_yield_count)
1387*5113495bSYour Name /**
1388*5113495bSYour Name * hif_dev_dsr_handler() - Synchronous interrupt handler
1389*5113495bSYour Name *
1390*5113495bSYour Name * @context: hif send context
1391*5113495bSYour Name *
1392*5113495bSYour Name * Return: 0 for success and non-zero for failure
1393*5113495bSYour Name */
hif_dev_dsr_handler(void * context)1394*5113495bSYour Name QDF_STATUS hif_dev_dsr_handler(void *context)
1395*5113495bSYour Name {
1396*5113495bSYour Name struct hif_sdio_device *pdev = (struct hif_sdio_device *)context;
1397*5113495bSYour Name QDF_STATUS status = QDF_STATUS_SUCCESS;
1398*5113495bSYour Name bool done = false;
1399*5113495bSYour Name bool async_proc = false;
1400*5113495bSYour Name
1401*5113495bSYour Name /* reset the recv counter that tracks when we need
1402*5113495bSYour Name * to yield from the DSR
1403*5113495bSYour Name */
1404*5113495bSYour Name pdev->CurrentDSRRecvCount = 0;
1405*5113495bSYour Name /* reset counter used to flag a re-scan of IRQ
1406*5113495bSYour Name * status registers on the target
1407*5113495bSYour Name */
1408*5113495bSYour Name pdev->RecheckIRQStatusCnt = 0;
1409*5113495bSYour Name
1410*5113495bSYour Name while (!done) {
1411*5113495bSYour Name status = hif_dev_process_pending_irqs(pdev, &done, &async_proc);
1412*5113495bSYour Name if (QDF_IS_STATUS_ERROR(status))
1413*5113495bSYour Name break;
1414*5113495bSYour Name
1415*5113495bSYour Name if (pdev->HifIRQProcessingMode == HIF_DEVICE_IRQ_SYNC_ONLY) {
1416*5113495bSYour Name /* the HIF layer does not allow async IRQ processing,
1417*5113495bSYour Name * override the asyncProc flag
1418*5113495bSYour Name */
1419*5113495bSYour Name async_proc = false;
1420*5113495bSYour Name /* this will cause us to re-enter ProcessPendingIRQ()
1421*5113495bSYour Name * and re-read interrupt status registers.
1422*5113495bSYour Name * This has a nice side effect of blocking us until all
1423*5113495bSYour Name * async read requests are completed. This behavior is
1424*5113495bSYour Name * required as we do not allow ASYNC processing
1425*5113495bSYour Name * in interrupt handlers (like Windows CE)
1426*5113495bSYour Name */
1427*5113495bSYour Name
1428*5113495bSYour Name if (pdev->DSRCanYield && DEV_CHECK_RECV_YIELD(pdev))
1429*5113495bSYour Name /* ProcessPendingIRQs() pulled enough recv
1430*5113495bSYour Name * messages to satisfy the yield count, stop
1431*5113495bSYour Name * checking for more messages and return
1432*5113495bSYour Name */
1433*5113495bSYour Name break;
1434*5113495bSYour Name }
1435*5113495bSYour Name
1436*5113495bSYour Name if (async_proc) {
1437*5113495bSYour Name /* the function does some async I/O for performance,
1438*5113495bSYour Name * we need to exit the ISR immediately, the check below
1439*5113495bSYour Name * will prevent the interrupt from being
1440*5113495bSYour Name * Ack'd while we handle it asynchronously
1441*5113495bSYour Name */
1442*5113495bSYour Name break;
1443*5113495bSYour Name }
1444*5113495bSYour Name }
1445*5113495bSYour Name
1446*5113495bSYour Name if (QDF_IS_STATUS_SUCCESS(status) && !async_proc) {
1447*5113495bSYour Name /* Ack the interrupt only if :
1448*5113495bSYour Name * 1. we did not get any errors in processing interrupts
1449*5113495bSYour Name * 2. there are no outstanding async processing requests
1450*5113495bSYour Name */
1451*5113495bSYour Name if (pdev->DSRCanYield) {
1452*5113495bSYour Name /* if the DSR can yield do not ACK the interrupt, there
1453*5113495bSYour Name * could be more pending messages. The HIF layer
1454*5113495bSYour Name * must ACK the interrupt on behalf of HTC
1455*5113495bSYour Name */
1456*5113495bSYour Name hif_info("Yield (RX count: %d)",
1457*5113495bSYour Name pdev->CurrentDSRRecvCount);
1458*5113495bSYour Name } else {
1459*5113495bSYour Name hif_ack_interrupt(pdev->HIFDevice);
1460*5113495bSYour Name }
1461*5113495bSYour Name }
1462*5113495bSYour Name
1463*5113495bSYour Name return status;
1464*5113495bSYour Name }
1465*5113495bSYour Name
1466*5113495bSYour Name /**
1467*5113495bSYour Name * hif_read_write() - queue a read/write request
1468*5113495bSYour Name * @device: pointer to hif device structure
1469*5113495bSYour Name * @address: address to read
1470*5113495bSYour Name * @buffer: buffer to hold read/write data
1471*5113495bSYour Name * @length: length to read/write
1472*5113495bSYour Name * @request: read/write/sync/async request
1473*5113495bSYour Name * @context: pointer to hold calling context
1474*5113495bSYour Name *
1475*5113495bSYour Name * Return: 0 on success, error number otherwise.
1476*5113495bSYour Name */
1477*5113495bSYour Name QDF_STATUS
hif_read_write(struct hif_sdio_dev * device,unsigned long address,char * buffer,uint32_t length,uint32_t request,void * context)1478*5113495bSYour Name hif_read_write(struct hif_sdio_dev *device,
1479*5113495bSYour Name unsigned long address,
1480*5113495bSYour Name char *buffer, uint32_t length,
1481*5113495bSYour Name uint32_t request, void *context)
1482*5113495bSYour Name {
1483*5113495bSYour Name QDF_STATUS status = QDF_STATUS_SUCCESS;
1484*5113495bSYour Name struct bus_request *busrequest;
1485*5113495bSYour Name
1486*5113495bSYour Name AR_DEBUG_ASSERT(device);
1487*5113495bSYour Name AR_DEBUG_ASSERT(device->func);
1488*5113495bSYour Name hif_debug("device 0x%pK addr 0x%lX buffer 0x%pK",
1489*5113495bSYour Name device, address, buffer);
1490*5113495bSYour Name hif_debug("len %d req 0x%X context 0x%pK",
1491*5113495bSYour Name length, request, context);
1492*5113495bSYour Name
1493*5113495bSYour Name /*sdio r/w action is not needed when suspend, so just return */
1494*5113495bSYour Name if ((device->is_suspend) &&
1495*5113495bSYour Name (device->power_config == HIF_DEVICE_POWER_CUT)) {
1496*5113495bSYour Name AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("skip io when suspending\n"));
1497*5113495bSYour Name return QDF_STATUS_SUCCESS;
1498*5113495bSYour Name }
1499*5113495bSYour Name do {
1500*5113495bSYour Name if ((request & HIF_ASYNCHRONOUS) ||
1501*5113495bSYour Name (request & HIF_SYNCHRONOUS)) {
1502*5113495bSYour Name /* serialize all requests through the async thread */
1503*5113495bSYour Name AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
1504*5113495bSYour Name ("%s: Execution mode: %s\n", __func__,
1505*5113495bSYour Name (request & HIF_ASYNCHRONOUS) ? "Async"
1506*5113495bSYour Name : "Synch"));
1507*5113495bSYour Name busrequest = hif_allocate_bus_request(device);
1508*5113495bSYour Name if (!busrequest) {
1509*5113495bSYour Name hif_err("bus requests unavail");
1510*5113495bSYour Name hif_err("%s, addr:0x%lX, len:%d",
1511*5113495bSYour Name request & HIF_SDIO_READ ? "READ" :
1512*5113495bSYour Name "WRITE", address, length);
1513*5113495bSYour Name return QDF_STATUS_E_FAILURE;
1514*5113495bSYour Name }
1515*5113495bSYour Name busrequest->address = address;
1516*5113495bSYour Name busrequest->buffer = buffer;
1517*5113495bSYour Name busrequest->length = length;
1518*5113495bSYour Name busrequest->request = request;
1519*5113495bSYour Name busrequest->context = context;
1520*5113495bSYour Name
1521*5113495bSYour Name add_to_async_list(device, busrequest);
1522*5113495bSYour Name
1523*5113495bSYour Name if (request & HIF_SYNCHRONOUS) {
1524*5113495bSYour Name AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
1525*5113495bSYour Name ("%s: queued sync req: 0x%lX\n",
1526*5113495bSYour Name __func__,
1527*5113495bSYour Name (unsigned long)busrequest));
1528*5113495bSYour Name
1529*5113495bSYour Name /* wait for completion */
1530*5113495bSYour Name up(&device->sem_async);
1531*5113495bSYour Name if (down_interruptible(&busrequest->sem_req) ==
1532*5113495bSYour Name 0) {
1533*5113495bSYour Name QDF_STATUS status = busrequest->status;
1534*5113495bSYour Name
1535*5113495bSYour Name hif_debug("sync freeing 0x%lX:0x%X",
1536*5113495bSYour Name (unsigned long)busrequest,
1537*5113495bSYour Name busrequest->status);
1538*5113495bSYour Name hif_debug("freeing req: 0x%X",
1539*5113495bSYour Name (unsigned int)request);
1540*5113495bSYour Name hif_free_bus_request(device,
1541*5113495bSYour Name busrequest);
1542*5113495bSYour Name return status;
1543*5113495bSYour Name } else {
1544*5113495bSYour Name /* interrupted, exit */
1545*5113495bSYour Name return QDF_STATUS_E_FAILURE;
1546*5113495bSYour Name }
1547*5113495bSYour Name } else {
1548*5113495bSYour Name hif_debug("queued async req: 0x%lX",
1549*5113495bSYour Name (unsigned long)busrequest);
1550*5113495bSYour Name up(&device->sem_async);
1551*5113495bSYour Name return QDF_STATUS_E_PENDING;
1552*5113495bSYour Name }
1553*5113495bSYour Name } else {
1554*5113495bSYour Name hif_err("Invalid execution mode: 0x%08x",
1555*5113495bSYour Name (unsigned int)request);
1556*5113495bSYour Name status = QDF_STATUS_E_INVAL;
1557*5113495bSYour Name break;
1558*5113495bSYour Name }
1559*5113495bSYour Name } while (0);
1560*5113495bSYour Name
1561*5113495bSYour Name return status;
1562*5113495bSYour Name }
1563*5113495bSYour Name
1564*5113495bSYour Name /**
1565*5113495bSYour Name * hif_sdio_func_enable() - Handle device enabling as per device
1566*5113495bSYour Name * @ol_sc: HIF device object
1567*5113495bSYour Name * @func: function pointer
1568*5113495bSYour Name *
1569*5113495bSYour Name * Return QDF_STATUS
1570*5113495bSYour Name */
hif_sdio_func_enable(struct hif_softc * ol_sc,struct sdio_func * func)1571*5113495bSYour Name static QDF_STATUS hif_sdio_func_enable(struct hif_softc *ol_sc,
1572*5113495bSYour Name struct sdio_func *func)
1573*5113495bSYour Name {
1574*5113495bSYour Name struct hif_sdio_dev *device = get_hif_device(ol_sc, func);
1575*5113495bSYour Name
1576*5113495bSYour Name if (device->is_disabled) {
1577*5113495bSYour Name int ret = 0;
1578*5113495bSYour Name
1579*5113495bSYour Name sdio_claim_host(func);
1580*5113495bSYour Name
1581*5113495bSYour Name ret = hif_sdio_quirk_async_intr(ol_sc, func);
1582*5113495bSYour Name if (ret) {
1583*5113495bSYour Name hif_err("Error setting async intr:%d", ret);
1584*5113495bSYour Name sdio_release_host(func);
1585*5113495bSYour Name return QDF_STATUS_E_FAILURE;
1586*5113495bSYour Name }
1587*5113495bSYour Name
1588*5113495bSYour Name func->enable_timeout = 100;
1589*5113495bSYour Name ret = sdio_enable_func(func);
1590*5113495bSYour Name if (ret) {
1591*5113495bSYour Name hif_err("Unable to enable function: %d", ret);
1592*5113495bSYour Name sdio_release_host(func);
1593*5113495bSYour Name return QDF_STATUS_E_FAILURE;
1594*5113495bSYour Name }
1595*5113495bSYour Name
1596*5113495bSYour Name ret = sdio_set_block_size(func, HIF_BLOCK_SIZE);
1597*5113495bSYour Name if (ret) {
1598*5113495bSYour Name hif_err("Unable to set block size 0x%X : %d",
1599*5113495bSYour Name HIF_BLOCK_SIZE, ret);
1600*5113495bSYour Name sdio_release_host(func);
1601*5113495bSYour Name return QDF_STATUS_E_FAILURE;
1602*5113495bSYour Name }
1603*5113495bSYour Name
1604*5113495bSYour Name ret = hif_sdio_quirk_mod_strength(ol_sc, func);
1605*5113495bSYour Name if (ret) {
1606*5113495bSYour Name hif_err("Error setting mod strength : %d", ret);
1607*5113495bSYour Name sdio_release_host(func);
1608*5113495bSYour Name return QDF_STATUS_E_FAILURE;
1609*5113495bSYour Name }
1610*5113495bSYour Name
1611*5113495bSYour Name sdio_release_host(func);
1612*5113495bSYour Name }
1613*5113495bSYour Name
1614*5113495bSYour Name return QDF_STATUS_SUCCESS;
1615*5113495bSYour Name }
1616*5113495bSYour Name
1617*5113495bSYour Name /**
1618*5113495bSYour Name * __hif_read_write() - sdio read/write wrapper
1619*5113495bSYour Name * @device: pointer to hif device structure
1620*5113495bSYour Name * @address: address to read
1621*5113495bSYour Name * @buffer: buffer to hold read/write data
1622*5113495bSYour Name * @length: length to read/write
1623*5113495bSYour Name * @request: read/write/sync/async request
1624*5113495bSYour Name * @context: pointer to hold calling context
1625*5113495bSYour Name *
1626*5113495bSYour Name * Return: 0 on success, error number otherwise.
1627*5113495bSYour Name */
1628*5113495bSYour Name static QDF_STATUS
__hif_read_write(struct hif_sdio_dev * device,uint32_t address,char * buffer,uint32_t length,uint32_t request,void * context)1629*5113495bSYour Name __hif_read_write(struct hif_sdio_dev *device,
1630*5113495bSYour Name uint32_t address, char *buffer,
1631*5113495bSYour Name uint32_t length, uint32_t request, void *context)
1632*5113495bSYour Name {
1633*5113495bSYour Name uint8_t opcode;
1634*5113495bSYour Name QDF_STATUS status = QDF_STATUS_SUCCESS;
1635*5113495bSYour Name int ret = A_OK;
1636*5113495bSYour Name uint8_t *tbuffer;
1637*5113495bSYour Name bool bounced = false;
1638*5113495bSYour Name
1639*5113495bSYour Name if (!device) {
1640*5113495bSYour Name hif_err("Device null!");
1641*5113495bSYour Name return QDF_STATUS_E_INVAL;
1642*5113495bSYour Name }
1643*5113495bSYour Name
1644*5113495bSYour Name if (!device->func) {
1645*5113495bSYour Name hif_err("func null!");
1646*5113495bSYour Name return QDF_STATUS_E_INVAL;
1647*5113495bSYour Name }
1648*5113495bSYour Name
1649*5113495bSYour Name hif_debug("addr:0X%06X, len:%08d, %s, %s",
1650*5113495bSYour Name address, length,
1651*5113495bSYour Name request & HIF_SDIO_READ ? "Read " : "Write",
1652*5113495bSYour Name request & HIF_ASYNCHRONOUS ? "Async" : "Sync ");
1653*5113495bSYour Name
1654*5113495bSYour Name do {
1655*5113495bSYour Name if (request & HIF_EXTENDED_IO) {
1656*5113495bSYour Name //HIF_INFO_HI("%s: Command type: CMD53\n", __func__);
1657*5113495bSYour Name } else {
1658*5113495bSYour Name hif_err("Invalid command type: 0x%08x\n", request);
1659*5113495bSYour Name status = QDF_STATUS_E_INVAL;
1660*5113495bSYour Name break;
1661*5113495bSYour Name }
1662*5113495bSYour Name
1663*5113495bSYour Name if (request & HIF_BLOCK_BASIS) {
1664*5113495bSYour Name /* round to whole block length size */
1665*5113495bSYour Name length =
1666*5113495bSYour Name (length / HIF_BLOCK_SIZE) *
1667*5113495bSYour Name HIF_BLOCK_SIZE;
1668*5113495bSYour Name hif_debug("Block mode (BlockLen: %d)", length);
1669*5113495bSYour Name } else if (request & HIF_BYTE_BASIS) {
1670*5113495bSYour Name hif_debug("Byte mode (BlockLen: %d)", length);
1671*5113495bSYour Name } else {
1672*5113495bSYour Name hif_err("Invalid data mode: 0x%08x", request);
1673*5113495bSYour Name status = QDF_STATUS_E_INVAL;
1674*5113495bSYour Name break;
1675*5113495bSYour Name }
1676*5113495bSYour Name if (request & HIF_SDIO_WRITE) {
1677*5113495bSYour Name hif_fixup_write_param(device, request,
1678*5113495bSYour Name &length, &address);
1679*5113495bSYour Name
1680*5113495bSYour Name hif_debug("addr:%08X, len:0x%08X, dummy:0x%04X",
1681*5113495bSYour Name address, length,
1682*5113495bSYour Name (request & HIF_DUMMY_SPACE_MASK) >> 16);
1683*5113495bSYour Name }
1684*5113495bSYour Name
1685*5113495bSYour Name if (request & HIF_FIXED_ADDRESS) {
1686*5113495bSYour Name opcode = CMD53_FIXED_ADDRESS;
1687*5113495bSYour Name hif_debug("Addr mode: fixed 0x%X", address);
1688*5113495bSYour Name } else if (request & HIF_INCREMENTAL_ADDRESS) {
1689*5113495bSYour Name opcode = CMD53_INCR_ADDRESS;
1690*5113495bSYour Name hif_debug("Address mode: Incremental 0x%X", address);
1691*5113495bSYour Name } else {
1692*5113495bSYour Name hif_err("Invalid address mode: 0x%08x", request);
1693*5113495bSYour Name status = QDF_STATUS_E_INVAL;
1694*5113495bSYour Name break;
1695*5113495bSYour Name }
1696*5113495bSYour Name
1697*5113495bSYour Name if (request & HIF_SDIO_WRITE) {
1698*5113495bSYour Name #if HIF_USE_DMA_BOUNCE_BUFFER
1699*5113495bSYour Name if (BUFFER_NEEDS_BOUNCE(buffer)) {
1700*5113495bSYour Name AR_DEBUG_ASSERT(device->dma_buffer);
1701*5113495bSYour Name tbuffer = device->dma_buffer;
1702*5113495bSYour Name /* copy the write data to the dma buffer */
1703*5113495bSYour Name AR_DEBUG_ASSERT(length <= HIF_DMA_BUFFER_SIZE);
1704*5113495bSYour Name if (length > HIF_DMA_BUFFER_SIZE) {
1705*5113495bSYour Name hif_err("Invalid write len: %d",
1706*5113495bSYour Name length);
1707*5113495bSYour Name status = QDF_STATUS_E_INVAL;
1708*5113495bSYour Name break;
1709*5113495bSYour Name }
1710*5113495bSYour Name memcpy(tbuffer, buffer, length);
1711*5113495bSYour Name bounced = true;
1712*5113495bSYour Name } else {
1713*5113495bSYour Name tbuffer = buffer;
1714*5113495bSYour Name }
1715*5113495bSYour Name #else
1716*5113495bSYour Name tbuffer = buffer;
1717*5113495bSYour Name #endif
1718*5113495bSYour Name if (opcode == CMD53_FIXED_ADDRESS && tbuffer) {
1719*5113495bSYour Name ret = sdio_writesb(device->func, address,
1720*5113495bSYour Name tbuffer, length);
1721*5113495bSYour Name hif_debug("r=%d addr:0x%X, len:%d, 0x%X",
1722*5113495bSYour Name ret, address, length,
1723*5113495bSYour Name *(int *)tbuffer);
1724*5113495bSYour Name } else if (tbuffer) {
1725*5113495bSYour Name ret = sdio_memcpy_toio(device->func, address,
1726*5113495bSYour Name tbuffer, length);
1727*5113495bSYour Name hif_debug("r=%d addr:0x%X, len:%d, 0x%X",
1728*5113495bSYour Name ret, address, length,
1729*5113495bSYour Name *(int *)tbuffer);
1730*5113495bSYour Name }
1731*5113495bSYour Name } else if (request & HIF_SDIO_READ) {
1732*5113495bSYour Name #if HIF_USE_DMA_BOUNCE_BUFFER
1733*5113495bSYour Name if (BUFFER_NEEDS_BOUNCE(buffer)) {
1734*5113495bSYour Name AR_DEBUG_ASSERT(device->dma_buffer);
1735*5113495bSYour Name AR_DEBUG_ASSERT(length <= HIF_DMA_BUFFER_SIZE);
1736*5113495bSYour Name if (length > HIF_DMA_BUFFER_SIZE) {
1737*5113495bSYour Name hif_err("Invalid read len: %d", length);
1738*5113495bSYour Name status = QDF_STATUS_E_INVAL;
1739*5113495bSYour Name break;
1740*5113495bSYour Name }
1741*5113495bSYour Name tbuffer = device->dma_buffer;
1742*5113495bSYour Name bounced = true;
1743*5113495bSYour Name } else {
1744*5113495bSYour Name tbuffer = buffer;
1745*5113495bSYour Name }
1746*5113495bSYour Name #else
1747*5113495bSYour Name tbuffer = buffer;
1748*5113495bSYour Name #endif
1749*5113495bSYour Name if (opcode == CMD53_FIXED_ADDRESS && tbuffer) {
1750*5113495bSYour Name ret = sdio_readsb(device->func, tbuffer,
1751*5113495bSYour Name address, length);
1752*5113495bSYour Name hif_debug("r=%d addr:0x%X, len:%d, 0x%X",
1753*5113495bSYour Name ret, address, length,
1754*5113495bSYour Name *(int *)tbuffer);
1755*5113495bSYour Name } else if (tbuffer) {
1756*5113495bSYour Name ret = sdio_memcpy_fromio(device->func,
1757*5113495bSYour Name tbuffer, address,
1758*5113495bSYour Name length);
1759*5113495bSYour Name hif_debug("r=%d addr:0x%X, len:%d, 0x%X",
1760*5113495bSYour Name ret, address, length,
1761*5113495bSYour Name *(int *)tbuffer);
1762*5113495bSYour Name }
1763*5113495bSYour Name #if HIF_USE_DMA_BOUNCE_BUFFER
1764*5113495bSYour Name if (bounced && tbuffer)
1765*5113495bSYour Name memcpy(buffer, tbuffer, length);
1766*5113495bSYour Name #endif
1767*5113495bSYour Name } else {
1768*5113495bSYour Name hif_err("Invalid dir: 0x%08x", request);
1769*5113495bSYour Name status = QDF_STATUS_E_INVAL;
1770*5113495bSYour Name return status;
1771*5113495bSYour Name }
1772*5113495bSYour Name
1773*5113495bSYour Name if (ret) {
1774*5113495bSYour Name hif_err("SDIO bus operation failed!");
1775*5113495bSYour Name hif_err("MMC stack returned : %d", ret);
1776*5113495bSYour Name hif_err("addr:0X%06X, len:%08d, %s, %s",
1777*5113495bSYour Name address, length,
1778*5113495bSYour Name request & HIF_SDIO_READ ? "Read " : "Write",
1779*5113495bSYour Name request & HIF_ASYNCHRONOUS ?
1780*5113495bSYour Name "Async" : "Sync");
1781*5113495bSYour Name status = QDF_STATUS_E_FAILURE;
1782*5113495bSYour Name }
1783*5113495bSYour Name } while (false);
1784*5113495bSYour Name
1785*5113495bSYour Name return status;
1786*5113495bSYour Name }
1787*5113495bSYour Name
1788*5113495bSYour Name /**
1789*5113495bSYour Name * async_task() - thread function to serialize all bus requests
1790*5113495bSYour Name * @param: pointer to hif device
1791*5113495bSYour Name *
1792*5113495bSYour Name * thread function to serialize all requests, both sync and async
1793*5113495bSYour Name * Return: 0 on success, error number otherwise.
1794*5113495bSYour Name */
async_task(void * param)1795*5113495bSYour Name static int async_task(void *param)
1796*5113495bSYour Name {
1797*5113495bSYour Name struct hif_sdio_dev *device;
1798*5113495bSYour Name struct bus_request *request;
1799*5113495bSYour Name QDF_STATUS status;
1800*5113495bSYour Name bool claimed = false;
1801*5113495bSYour Name
1802*5113495bSYour Name device = (struct hif_sdio_dev *)param;
1803*5113495bSYour Name set_current_state(TASK_INTERRUPTIBLE);
1804*5113495bSYour Name while (!device->async_shutdown) {
1805*5113495bSYour Name /* wait for work */
1806*5113495bSYour Name if (down_interruptible(&device->sem_async) != 0) {
1807*5113495bSYour Name /* interrupted, exit */
1808*5113495bSYour Name AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
1809*5113495bSYour Name ("%s: async task interrupted\n",
1810*5113495bSYour Name __func__));
1811*5113495bSYour Name break;
1812*5113495bSYour Name }
1813*5113495bSYour Name if (device->async_shutdown) {
1814*5113495bSYour Name AR_DEBUG_PRINTF(ATH_DEBUG_TRACE,
1815*5113495bSYour Name ("%s: async task stopping\n",
1816*5113495bSYour Name __func__));
1817*5113495bSYour Name break;
1818*5113495bSYour Name }
1819*5113495bSYour Name /* we want to hold the host over multiple cmds
1820*5113495bSYour Name * if possible, but holding the host blocks
1821*5113495bSYour Name * card interrupts
1822*5113495bSYour Name */
1823*5113495bSYour Name qdf_spin_lock_irqsave(&device->asynclock);
1824*5113495bSYour Name /* pull the request to work on */
1825*5113495bSYour Name while (device->asyncreq) {
1826*5113495bSYour Name request = device->asyncreq;
1827*5113495bSYour Name if (request->inusenext)
1828*5113495bSYour Name device->asyncreq = request->inusenext;
1829*5113495bSYour Name else
1830*5113495bSYour Name device->asyncreq = NULL;
1831*5113495bSYour Name qdf_spin_unlock_irqrestore(&device->asynclock);
1832*5113495bSYour Name hif_debug("processing req: 0x%lX",
1833*5113495bSYour Name (unsigned long)request);
1834*5113495bSYour Name
1835*5113495bSYour Name if (!claimed) {
1836*5113495bSYour Name sdio_claim_host(device->func);
1837*5113495bSYour Name claimed = true;
1838*5113495bSYour Name }
1839*5113495bSYour Name if (request->scatter_req) {
1840*5113495bSYour Name A_ASSERT(device->scatter_enabled);
1841*5113495bSYour Name /* pass the request to scatter routine which
1842*5113495bSYour Name * executes it synchronously, note, no need
1843*5113495bSYour Name * to free the request since scatter requests
1844*5113495bSYour Name * are maintained on a separate list
1845*5113495bSYour Name */
1846*5113495bSYour Name status = do_hif_read_write_scatter(device,
1847*5113495bSYour Name request);
1848*5113495bSYour Name } else {
1849*5113495bSYour Name /* call hif_read_write in sync mode */
1850*5113495bSYour Name status =
1851*5113495bSYour Name __hif_read_write(device,
1852*5113495bSYour Name request->address,
1853*5113495bSYour Name request->buffer,
1854*5113495bSYour Name request->length,
1855*5113495bSYour Name request->
1856*5113495bSYour Name request &
1857*5113495bSYour Name ~HIF_SYNCHRONOUS,
1858*5113495bSYour Name NULL);
1859*5113495bSYour Name if (request->request & HIF_ASYNCHRONOUS) {
1860*5113495bSYour Name void *context = request->context;
1861*5113495bSYour Name
1862*5113495bSYour Name hif_free_bus_request(device, request);
1863*5113495bSYour Name device->htc_callbacks.
1864*5113495bSYour Name rw_compl_handler(context, status);
1865*5113495bSYour Name } else {
1866*5113495bSYour Name hif_debug("upping req: 0x%lX",
1867*5113495bSYour Name (unsigned long)request);
1868*5113495bSYour Name request->status = status;
1869*5113495bSYour Name up(&request->sem_req);
1870*5113495bSYour Name }
1871*5113495bSYour Name }
1872*5113495bSYour Name qdf_spin_lock_irqsave(&device->asynclock);
1873*5113495bSYour Name }
1874*5113495bSYour Name qdf_spin_unlock_irqrestore(&device->asynclock);
1875*5113495bSYour Name if (claimed) {
1876*5113495bSYour Name sdio_release_host(device->func);
1877*5113495bSYour Name claimed = false;
1878*5113495bSYour Name }
1879*5113495bSYour Name }
1880*5113495bSYour Name
1881*5113495bSYour Name kthread_complete_and_exit(&device->async_completion, 0);
1882*5113495bSYour Name
1883*5113495bSYour Name return 0;
1884*5113495bSYour Name }
1885*5113495bSYour Name
1886*5113495bSYour Name /**
1887*5113495bSYour Name * hif_disable_func() - Disable SDIO function
1888*5113495bSYour Name *
1889*5113495bSYour Name * @device: HIF device pointer
1890*5113495bSYour Name * @func: SDIO function pointer
1891*5113495bSYour Name * @reset: If this is called from resume or probe
1892*5113495bSYour Name *
1893*5113495bSYour Name * Return: 0 in case of success, else error value
1894*5113495bSYour Name */
hif_disable_func(struct hif_sdio_dev * device,struct sdio_func * func,bool reset)1895*5113495bSYour Name QDF_STATUS hif_disable_func(struct hif_sdio_dev *device,
1896*5113495bSYour Name struct sdio_func *func,
1897*5113495bSYour Name bool reset)
1898*5113495bSYour Name {
1899*5113495bSYour Name QDF_STATUS status = QDF_STATUS_SUCCESS;
1900*5113495bSYour Name
1901*5113495bSYour Name HIF_ENTER();
1902*5113495bSYour Name if (!IS_ERR(device->async_task)) {
1903*5113495bSYour Name init_completion(&device->async_completion);
1904*5113495bSYour Name device->async_shutdown = 1;
1905*5113495bSYour Name up(&device->sem_async);
1906*5113495bSYour Name wait_for_completion(&device->async_completion);
1907*5113495bSYour Name device->async_task = NULL;
1908*5113495bSYour Name sema_init(&device->sem_async, 0);
1909*5113495bSYour Name }
1910*5113495bSYour Name
1911*5113495bSYour Name status = hif_sdio_func_disable(device, func, reset);
1912*5113495bSYour Name if (status == QDF_STATUS_SUCCESS)
1913*5113495bSYour Name device->is_disabled = true;
1914*5113495bSYour Name
1915*5113495bSYour Name cleanup_hif_scatter_resources(device);
1916*5113495bSYour Name
1917*5113495bSYour Name HIF_EXIT();
1918*5113495bSYour Name
1919*5113495bSYour Name return status;
1920*5113495bSYour Name }
1921*5113495bSYour Name
1922*5113495bSYour Name /**
1923*5113495bSYour Name * hif_enable_func() - Enable SDIO function
1924*5113495bSYour Name *
1925*5113495bSYour Name * @ol_sc: HIF object pointer
1926*5113495bSYour Name * @device: HIF device pointer
1927*5113495bSYour Name * @func: SDIO function pointer
1928*5113495bSYour Name * @resume: If this is called from resume or probe
1929*5113495bSYour Name *
1930*5113495bSYour Name * Return: 0 in case of success, else error value
1931*5113495bSYour Name */
hif_enable_func(struct hif_softc * ol_sc,struct hif_sdio_dev * device,struct sdio_func * func,bool resume)1932*5113495bSYour Name QDF_STATUS hif_enable_func(struct hif_softc *ol_sc, struct hif_sdio_dev *device,
1933*5113495bSYour Name struct sdio_func *func, bool resume)
1934*5113495bSYour Name {
1935*5113495bSYour Name QDF_STATUS ret = QDF_STATUS_SUCCESS;
1936*5113495bSYour Name
1937*5113495bSYour Name HIF_ENTER();
1938*5113495bSYour Name
1939*5113495bSYour Name if (!device) {
1940*5113495bSYour Name hif_err("HIF device is NULL");
1941*5113495bSYour Name return QDF_STATUS_E_INVAL;
1942*5113495bSYour Name }
1943*5113495bSYour Name
1944*5113495bSYour Name if (hif_sdio_func_enable(ol_sc, func))
1945*5113495bSYour Name return QDF_STATUS_E_FAILURE;
1946*5113495bSYour Name
1947*5113495bSYour Name /* create async I/O thread */
1948*5113495bSYour Name if (!device->async_task && device->is_disabled) {
1949*5113495bSYour Name device->async_shutdown = 0;
1950*5113495bSYour Name device->async_task = kthread_create(async_task,
1951*5113495bSYour Name (void *)device,
1952*5113495bSYour Name "AR6K Async");
1953*5113495bSYour Name if (IS_ERR(device->async_task)) {
1954*5113495bSYour Name hif_err("Error creating async task");
1955*5113495bSYour Name return QDF_STATUS_E_FAILURE;
1956*5113495bSYour Name }
1957*5113495bSYour Name device->is_disabled = false;
1958*5113495bSYour Name wake_up_process(device->async_task);
1959*5113495bSYour Name }
1960*5113495bSYour Name
1961*5113495bSYour Name if (!resume)
1962*5113495bSYour Name ret = hif_sdio_probe(ol_sc, func, device);
1963*5113495bSYour Name
1964*5113495bSYour Name HIF_EXIT();
1965*5113495bSYour Name
1966*5113495bSYour Name return ret;
1967*5113495bSYour Name }
1968*5113495bSYour Name #endif /* CONFIG_SDIO_TRANSFER_MAILBOX */
1969