xref: /wlan-driver/qcacld-3.0/core/dp/htt/htt_tx.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2011, 2014-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name /**
21*5113495bSYour Name  * @file htt_tx.c
22*5113495bSYour Name  * @brief Implement transmit aspects of HTT.
23*5113495bSYour Name  * @details
24*5113495bSYour Name  *  This file contains three categories of HTT tx code:
25*5113495bSYour Name  *  1.  An abstraction of the tx descriptor, to hide the
26*5113495bSYour Name  *      differences between the HL vs. LL tx descriptor.
27*5113495bSYour Name  *  2.  Functions for allocating and freeing HTT tx descriptors.
28*5113495bSYour Name  *  3.  The function that accepts a tx frame from txrx and sends the
29*5113495bSYour Name  *      tx frame to HTC.
30*5113495bSYour Name  */
31*5113495bSYour Name #include <osdep.h>              /* uint32_t, offsetof, etc. */
32*5113495bSYour Name #include <qdf_types.h>          /* qdf_dma_addr_t */
33*5113495bSYour Name #include <qdf_mem.h>         /* qdf_mem_alloc_consistent et al */
34*5113495bSYour Name #include <qdf_nbuf.h>           /* qdf_nbuf_t, etc. */
35*5113495bSYour Name #include <qdf_time.h>           /* qdf_mdelay */
36*5113495bSYour Name 
37*5113495bSYour Name #include <htt.h>                /* htt_tx_msdu_desc_t */
38*5113495bSYour Name #include <htc.h>                /* HTC_HDR_LENGTH */
39*5113495bSYour Name #include <htc_api.h>            /* htc_flush_surprise_remove */
40*5113495bSYour Name #include <ol_cfg.h>             /* ol_cfg_netbuf_frags_max, etc. */
41*5113495bSYour Name #include <ol_htt_tx_api.h>      /* HTT_TX_DESC_VADDR_OFFSET */
42*5113495bSYour Name #include <ol_txrx_htt_api.h>    /* ol_tx_msdu_id_storage */
43*5113495bSYour Name #include <ol_txrx_internal.h>
44*5113495bSYour Name #include <htt_internal.h>
45*5113495bSYour Name 
46*5113495bSYour Name #include <cds_utils.h>
47*5113495bSYour Name #include <ce_api.h>
48*5113495bSYour Name #include <ce_internal.h>
49*5113495bSYour Name 
50*5113495bSYour Name /* IPA Micro controller TX data packet HTT Header Preset
51*5113495bSYour Name  * 31 | 30  29 | 28 | 27 | 26  22  | 21   16 | 15  13   | 12  8      | 7 0
52*5113495bSYour Name  ***----------------------------------------------------------------------------
53*5113495bSYour Name  * R  | CS  OL | R  | PP | ext TID | vdev ID | pkt type | pkt subtyp | msg type
54*5113495bSYour Name  * 0  | 0      | 0  |    | 0x1F    | 0       | 2        | 0          | 0x01
55*5113495bSYour Name  ***----------------------------------------------------------------------------
56*5113495bSYour Name  * pkt ID                                    | pkt length
57*5113495bSYour Name  ***----------------------------------------------------------------------------
58*5113495bSYour Name  *                                frag_desc_ptr
59*5113495bSYour Name  ***----------------------------------------------------------------------------
60*5113495bSYour Name  *                                   peer_id
61*5113495bSYour Name  ***----------------------------------------------------------------------------
62*5113495bSYour Name  */
63*5113495bSYour Name #define HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT 0x07C04001
64*5113495bSYour Name 
65*5113495bSYour Name #ifdef QCA_WIFI_3_0
66*5113495bSYour Name #define IPA_UC_TX_BUF_FRAG_DESC_OFFSET 20
67*5113495bSYour Name #define IPA_UC_TX_BUF_FRAG_HDR_OFFSET  64
68*5113495bSYour Name #define IPA_UC_TX_BUF_TSO_HDR_SIZE     6
69*5113495bSYour Name #define IPA_UC_TX_BUF_PADDR_HI_MASK    0x0000001F
70*5113495bSYour Name #else
71*5113495bSYour Name #define IPA_UC_TX_BUF_FRAG_DESC_OFFSET 16
72*5113495bSYour Name #define IPA_UC_TX_BUF_FRAG_HDR_OFFSET  32
73*5113495bSYour Name #endif /* QCA_WIFI_3_0 */
74*5113495bSYour Name 
75*5113495bSYour Name #if HTT_PADDR64
76*5113495bSYour Name #define HTT_TX_DESC_FRAG_FIELD_UPDATE(frag_filed_ptr, frag_desc_addr)          \
77*5113495bSYour Name do {                                                                           \
78*5113495bSYour Name 	*frag_filed_ptr = qdf_get_lower_32_bits(frag_desc_addr);               \
79*5113495bSYour Name 	frag_filed_ptr++;                                                      \
80*5113495bSYour Name 	/* frags_desc_ptr.hi */                                                \
81*5113495bSYour Name 	*frag_filed_ptr = qdf_get_upper_32_bits(frag_desc_addr) & 0x1F;        \
82*5113495bSYour Name } while (0)
83*5113495bSYour Name #else
84*5113495bSYour Name #define HTT_TX_DESC_FRAG_FIELD_UPDATE(frag_filed_ptr, frag_desc_addr)          \
85*5113495bSYour Name do {                                                                           \
86*5113495bSYour Name 	*frag_filed_ptr = qdf_get_lower_32_bits(frag_desc_addr);               \
87*5113495bSYour Name } while (0)
88*5113495bSYour Name #endif
89*5113495bSYour Name 
90*5113495bSYour Name /*--- setup / tear-down functions -------------------------------------------*/
91*5113495bSYour Name 
92*5113495bSYour Name static qdf_dma_addr_t htt_tx_get_paddr(htt_pdev_handle pdev,
93*5113495bSYour Name 				char *target_vaddr);
94*5113495bSYour Name 
95*5113495bSYour Name #ifdef HELIUMPLUS
96*5113495bSYour Name /**
97*5113495bSYour Name  * htt_tx_desc_get_size() - get tx descripotrs size
98*5113495bSYour Name  * @pdev:	htt device instance pointer
99*5113495bSYour Name  *
100*5113495bSYour Name  * This function will get HTT TX descriptor size and fragment descriptor size
101*5113495bSYour Name  *
102*5113495bSYour Name  * Return: None
103*5113495bSYour Name  */
htt_tx_desc_get_size(struct htt_pdev_t * pdev)104*5113495bSYour Name static void htt_tx_desc_get_size(struct htt_pdev_t *pdev)
105*5113495bSYour Name {
106*5113495bSYour Name 	pdev->tx_descs.size = sizeof(struct htt_host_tx_desc_t);
107*5113495bSYour Name 	if (HTT_WIFI_IP_VERSION(pdev->wifi_ip_ver.major, 0x2)) {
108*5113495bSYour Name 		/*
109*5113495bSYour Name 		 * sizeof MSDU_EXT/Fragmentation descriptor.
110*5113495bSYour Name 		 */
111*5113495bSYour Name 		pdev->frag_descs.size = sizeof(struct msdu_ext_desc_t);
112*5113495bSYour Name 	} else {
113*5113495bSYour Name 		/*
114*5113495bSYour Name 		 * Add the fragmentation descriptor elements.
115*5113495bSYour Name 		 * Add the most that the OS may deliver, plus one more
116*5113495bSYour Name 		 * in case the txrx code adds a prefix fragment (for
117*5113495bSYour Name 		 * TSO or audio interworking SNAP header)
118*5113495bSYour Name 		 */
119*5113495bSYour Name 		pdev->frag_descs.size =
120*5113495bSYour Name 			(ol_cfg_netbuf_frags_max(pdev->ctrl_pdev)+1) * 8
121*5113495bSYour Name 			+ 4;
122*5113495bSYour Name 	}
123*5113495bSYour Name }
124*5113495bSYour Name 
125*5113495bSYour Name /**
126*5113495bSYour Name  * htt_tx_frag_desc_field_update() - Update fragment descriptor field
127*5113495bSYour Name  * @pdev:	htt device instance pointer
128*5113495bSYour Name  * @fptr:	Fragment descriptor field pointer
129*5113495bSYour Name  * @index:	Descriptor index to find page and offset
130*5113495bSYour Name  * @desc_v_ptr:	descriptor virtual pointot to find offset
131*5113495bSYour Name  *
132*5113495bSYour Name  * This function will update fragment descriptor field with actual fragment
133*5113495bSYour Name  * descriptor stating physical pointer
134*5113495bSYour Name  *
135*5113495bSYour Name  * Return: None
136*5113495bSYour Name  */
htt_tx_frag_desc_field_update(struct htt_pdev_t * pdev,uint32_t * fptr,unsigned int index,struct htt_tx_msdu_desc_t * desc_v_ptr)137*5113495bSYour Name static void htt_tx_frag_desc_field_update(struct htt_pdev_t *pdev,
138*5113495bSYour Name 		uint32_t *fptr, unsigned int index,
139*5113495bSYour Name 		struct htt_tx_msdu_desc_t *desc_v_ptr)
140*5113495bSYour Name {
141*5113495bSYour Name 	unsigned int target_page;
142*5113495bSYour Name 	unsigned int offset;
143*5113495bSYour Name 	struct qdf_mem_dma_page_t *dma_page;
144*5113495bSYour Name 	qdf_dma_addr_t frag_desc_addr;
145*5113495bSYour Name 
146*5113495bSYour Name 	target_page = index / pdev->frag_descs.desc_pages.num_element_per_page;
147*5113495bSYour Name 	offset = index % pdev->frag_descs.desc_pages.num_element_per_page;
148*5113495bSYour Name 	dma_page = &pdev->frag_descs.desc_pages.dma_pages[target_page];
149*5113495bSYour Name 	frag_desc_addr = (dma_page->page_p_addr +
150*5113495bSYour Name 		offset * pdev->frag_descs.size);
151*5113495bSYour Name 	HTT_TX_DESC_FRAG_FIELD_UPDATE(fptr, frag_desc_addr);
152*5113495bSYour Name }
153*5113495bSYour Name 
154*5113495bSYour Name /**
155*5113495bSYour Name  * htt_tx_frag_desc_attach() - Attach fragment descriptor
156*5113495bSYour Name  * @pdev:		htt device instance pointer
157*5113495bSYour Name  * @desc_pool_elems:	Number of fragment descriptor
158*5113495bSYour Name  *
159*5113495bSYour Name  * This function will allocate fragment descriptor
160*5113495bSYour Name  *
161*5113495bSYour Name  * Return: 0 success
162*5113495bSYour Name  */
htt_tx_frag_desc_attach(struct htt_pdev_t * pdev,uint16_t desc_pool_elems)163*5113495bSYour Name static int htt_tx_frag_desc_attach(struct htt_pdev_t *pdev,
164*5113495bSYour Name 	uint16_t desc_pool_elems)
165*5113495bSYour Name {
166*5113495bSYour Name 	pdev->frag_descs.pool_elems = desc_pool_elems;
167*5113495bSYour Name 	qdf_mem_multi_pages_alloc(pdev->osdev, &pdev->frag_descs.desc_pages,
168*5113495bSYour Name 		pdev->frag_descs.size, desc_pool_elems,
169*5113495bSYour Name 		qdf_get_dma_mem_context((&pdev->frag_descs), memctx), false);
170*5113495bSYour Name 	if ((0 == pdev->frag_descs.desc_pages.num_pages) ||
171*5113495bSYour Name 		(!pdev->frag_descs.desc_pages.dma_pages)) {
172*5113495bSYour Name 		ol_txrx_err("FRAG descriptor alloc fail");
173*5113495bSYour Name 		return -ENOBUFS;
174*5113495bSYour Name 	}
175*5113495bSYour Name 	return 0;
176*5113495bSYour Name }
177*5113495bSYour Name 
178*5113495bSYour Name /**
179*5113495bSYour Name  * htt_tx_frag_desc_detach() - Detach fragment descriptor
180*5113495bSYour Name  * @pdev:		htt device instance pointer
181*5113495bSYour Name  *
182*5113495bSYour Name  * This function will free fragment descriptor
183*5113495bSYour Name  *
184*5113495bSYour Name  * Return: None
185*5113495bSYour Name  */
htt_tx_frag_desc_detach(struct htt_pdev_t * pdev)186*5113495bSYour Name static void htt_tx_frag_desc_detach(struct htt_pdev_t *pdev)
187*5113495bSYour Name {
188*5113495bSYour Name 	qdf_mem_multi_pages_free(pdev->osdev, &pdev->frag_descs.desc_pages,
189*5113495bSYour Name 		qdf_get_dma_mem_context((&pdev->frag_descs), memctx), false);
190*5113495bSYour Name }
191*5113495bSYour Name 
192*5113495bSYour Name /**
193*5113495bSYour Name  * htt_tx_frag_alloc() - Allocate single fragment descriptor from the pool
194*5113495bSYour Name  * @pdev:		htt device instance pointer
195*5113495bSYour Name  * @index:		Descriptor index
196*5113495bSYour Name  * @frag_paddr:	        Fragment descriptor physical address
197*5113495bSYour Name  * @frag_ptr:		Fragment descriptor virtual address
198*5113495bSYour Name  *
199*5113495bSYour Name  * This function will free fragment descriptor
200*5113495bSYour Name  *
201*5113495bSYour Name  * Return: None
202*5113495bSYour Name  */
htt_tx_frag_alloc(htt_pdev_handle pdev,u_int16_t index,qdf_dma_addr_t * frag_paddr,void ** frag_ptr)203*5113495bSYour Name int htt_tx_frag_alloc(htt_pdev_handle pdev,
204*5113495bSYour Name 	u_int16_t index, qdf_dma_addr_t *frag_paddr, void **frag_ptr)
205*5113495bSYour Name {
206*5113495bSYour Name 	uint16_t frag_page_index;
207*5113495bSYour Name 	uint16_t frag_elem_index;
208*5113495bSYour Name 	struct qdf_mem_dma_page_t *dma_page;
209*5113495bSYour Name 
210*5113495bSYour Name 	/*
211*5113495bSYour Name 	 * Index should never be 0, since its used by the hardware
212*5113495bSYour Name 	 * to terminate the link.
213*5113495bSYour Name 	 */
214*5113495bSYour Name 	if (index >= pdev->tx_descs.pool_elems) {
215*5113495bSYour Name 		*frag_ptr = NULL;
216*5113495bSYour Name 		return 1;
217*5113495bSYour Name 	}
218*5113495bSYour Name 
219*5113495bSYour Name 	frag_page_index = index /
220*5113495bSYour Name 		pdev->frag_descs.desc_pages.num_element_per_page;
221*5113495bSYour Name 	frag_elem_index = index %
222*5113495bSYour Name 		pdev->frag_descs.desc_pages.num_element_per_page;
223*5113495bSYour Name 	dma_page = &pdev->frag_descs.desc_pages.dma_pages[frag_page_index];
224*5113495bSYour Name 
225*5113495bSYour Name 	*frag_ptr = dma_page->page_v_addr_start +
226*5113495bSYour Name 		frag_elem_index * pdev->frag_descs.size;
227*5113495bSYour Name 	if (((char *)(*frag_ptr) < dma_page->page_v_addr_start) ||
228*5113495bSYour Name 		((char *)(*frag_ptr) > dma_page->page_v_addr_end)) {
229*5113495bSYour Name 		*frag_ptr = NULL;
230*5113495bSYour Name 		return 1;
231*5113495bSYour Name 	}
232*5113495bSYour Name 
233*5113495bSYour Name 	*frag_paddr = dma_page->page_p_addr +
234*5113495bSYour Name 		frag_elem_index * pdev->frag_descs.size;
235*5113495bSYour Name 	return 0;
236*5113495bSYour Name }
237*5113495bSYour Name #else
238*5113495bSYour Name 
239*5113495bSYour Name /**
240*5113495bSYour Name  * htt_tx_desc_get_size() - get tx descripotrs size
241*5113495bSYour Name  * @pdev:	htt device instance pointer
242*5113495bSYour Name  *
243*5113495bSYour Name  * This function will get HTT TX descriptor size and fragment descriptor size
244*5113495bSYour Name  *
245*5113495bSYour Name  * Return: None
246*5113495bSYour Name  */
htt_tx_desc_get_size(struct htt_pdev_t * pdev)247*5113495bSYour Name static inline void htt_tx_desc_get_size(struct htt_pdev_t *pdev)
248*5113495bSYour Name {
249*5113495bSYour Name 	if (pdev->cfg.is_high_latency) {
250*5113495bSYour Name 		pdev->tx_descs.size = sizeof(struct htt_host_tx_desc_t);
251*5113495bSYour Name 	} else {
252*5113495bSYour Name 		/*
253*5113495bSYour Name 		 * Start with the size of the base struct
254*5113495bSYour Name 		 * that actually gets downloaded.
255*5113495bSYour Name 		 *
256*5113495bSYour Name 		 * Add the fragmentation descriptor elements.
257*5113495bSYour Name 		 * Add the most that the OS may deliver, plus one more
258*5113495bSYour Name 		 * in case the txrx code adds a prefix fragment (for
259*5113495bSYour Name 		 * TSO or audio interworking SNAP header)
260*5113495bSYour Name 		 */
261*5113495bSYour Name 		pdev->tx_descs.size =
262*5113495bSYour Name 		sizeof(struct htt_host_tx_desc_t)
263*5113495bSYour Name 		+ (ol_cfg_netbuf_frags_max(pdev->ctrl_pdev) + 1) * 8
264*5113495bSYour Name 		/* 2x uint32_t */
265*5113495bSYour Name 		+ 4; /* uint32_t fragmentation list terminator */
266*5113495bSYour Name 	}
267*5113495bSYour Name }
268*5113495bSYour Name 
269*5113495bSYour Name #ifndef CONFIG_HL_SUPPORT
270*5113495bSYour Name 
271*5113495bSYour Name /**
272*5113495bSYour Name  * htt_tx_frag_desc_field_update() - Update fragment descriptor field
273*5113495bSYour Name  * @pdev:	htt device instance pointer
274*5113495bSYour Name  * @fptr:	Fragment descriptor field pointer
275*5113495bSYour Name  * @index:	Descriptor index to find page and offset
276*5113495bSYour Name  * @desc_v_ptr:	descriptor virtual pointot to find offset
277*5113495bSYour Name  *
278*5113495bSYour Name  * This function will update fragment descriptor field with actual fragment
279*5113495bSYour Name  * descriptor stating physical pointer
280*5113495bSYour Name  *
281*5113495bSYour Name  * Return: None
282*5113495bSYour Name  */
htt_tx_frag_desc_field_update(struct htt_pdev_t * pdev,uint32_t * fptr,unsigned int index,struct htt_tx_msdu_desc_t * desc_v_ptr)283*5113495bSYour Name static void htt_tx_frag_desc_field_update(struct htt_pdev_t *pdev,
284*5113495bSYour Name 		uint32_t *fptr, unsigned int index,
285*5113495bSYour Name 		struct htt_tx_msdu_desc_t *desc_v_ptr)
286*5113495bSYour Name {
287*5113495bSYour Name 	*fptr = (uint32_t)htt_tx_get_paddr(pdev, (char *)desc_v_ptr) +
288*5113495bSYour Name 		HTT_TX_DESC_LEN;
289*5113495bSYour Name }
290*5113495bSYour Name #endif
291*5113495bSYour Name 
292*5113495bSYour Name /**
293*5113495bSYour Name  * htt_tx_frag_desc_attach() - Attach fragment descriptor
294*5113495bSYour Name  * @pdev:	htt device instance pointer
295*5113495bSYour Name  * @desc_pool_elems:	Number of fragment descriptor
296*5113495bSYour Name  *
297*5113495bSYour Name  * This function will allocate fragment descriptor
298*5113495bSYour Name  *
299*5113495bSYour Name  * Return: 0 success
300*5113495bSYour Name  */
htt_tx_frag_desc_attach(struct htt_pdev_t * pdev,int desc_pool_elems)301*5113495bSYour Name static inline int htt_tx_frag_desc_attach(struct htt_pdev_t *pdev,
302*5113495bSYour Name 	int desc_pool_elems)
303*5113495bSYour Name {
304*5113495bSYour Name 	return 0;
305*5113495bSYour Name }
306*5113495bSYour Name 
307*5113495bSYour Name /**
308*5113495bSYour Name  * htt_tx_frag_desc_detach() - Detach fragment descriptor
309*5113495bSYour Name  * @pdev:		htt device instance pointer
310*5113495bSYour Name  *
311*5113495bSYour Name  * This function will free fragment descriptor
312*5113495bSYour Name  *
313*5113495bSYour Name  * Return: None
314*5113495bSYour Name  */
htt_tx_frag_desc_detach(struct htt_pdev_t * pdev)315*5113495bSYour Name static void htt_tx_frag_desc_detach(struct htt_pdev_t *pdev) {}
316*5113495bSYour Name #endif /* HELIUMPLUS */
317*5113495bSYour Name 
318*5113495bSYour Name #ifdef CONFIG_HL_SUPPORT
319*5113495bSYour Name 
320*5113495bSYour Name /**
321*5113495bSYour Name  * htt_tx_attach() - Attach HTT device instance
322*5113495bSYour Name  * @pdev:		htt device instance pointer
323*5113495bSYour Name  * @desc_pool_elems:	Number of TX descriptors
324*5113495bSYour Name  *
325*5113495bSYour Name  * This function will allocate HTT TX resources
326*5113495bSYour Name  *
327*5113495bSYour Name  * Return: 0 Success
328*5113495bSYour Name  */
htt_tx_attach(struct htt_pdev_t * pdev,int desc_pool_elems)329*5113495bSYour Name int htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems)
330*5113495bSYour Name {
331*5113495bSYour Name 	int i, i_int, pool_size;
332*5113495bSYour Name 	uint32_t **p;
333*5113495bSYour Name 	uint32_t num_link = 0;
334*5113495bSYour Name 	uint16_t num_page, num_desc_per_page;
335*5113495bSYour Name 	void **cacheable_pages = NULL;
336*5113495bSYour Name 
337*5113495bSYour Name 	htt_tx_desc_get_size(pdev);
338*5113495bSYour Name 
339*5113495bSYour Name 	/*
340*5113495bSYour Name 	 * Make sure tx_descs.size is a multiple of 4-bytes.
341*5113495bSYour Name 	 * It should be, but round up just to be sure.
342*5113495bSYour Name 	 */
343*5113495bSYour Name 	pdev->tx_descs.size = (pdev->tx_descs.size + 3) & (~0x3);
344*5113495bSYour Name 
345*5113495bSYour Name 	pdev->tx_descs.pool_elems = desc_pool_elems;
346*5113495bSYour Name 	pdev->tx_descs.alloc_cnt = 0;
347*5113495bSYour Name 	pool_size = pdev->tx_descs.pool_elems * pdev->tx_descs.size;
348*5113495bSYour Name 	qdf_mem_multi_pages_alloc(pdev->osdev, &pdev->tx_descs.desc_pages,
349*5113495bSYour Name 				  pdev->tx_descs.size,
350*5113495bSYour Name 				  pdev->tx_descs.pool_elems,
351*5113495bSYour Name 				  qdf_get_dma_mem_context((&pdev->tx_descs),
352*5113495bSYour Name 							  memctx), true);
353*5113495bSYour Name 	if ((0 == pdev->tx_descs.desc_pages.num_pages) ||
354*5113495bSYour Name 	    (!pdev->tx_descs.desc_pages.cacheable_pages)) {
355*5113495bSYour Name 		ol_txrx_err("HTT desc alloc fail");
356*5113495bSYour Name 		goto out_fail;
357*5113495bSYour Name 	}
358*5113495bSYour Name 	num_page = pdev->tx_descs.desc_pages.num_pages;
359*5113495bSYour Name 	num_desc_per_page = pdev->tx_descs.desc_pages.num_element_per_page;
360*5113495bSYour Name 
361*5113495bSYour Name 	/* link tx descriptors into a freelist */
362*5113495bSYour Name 	cacheable_pages = pdev->tx_descs.desc_pages.cacheable_pages;
363*5113495bSYour Name 
364*5113495bSYour Name 	pdev->tx_descs.freelist = (uint32_t *)cacheable_pages[0];
365*5113495bSYour Name 	p = (uint32_t **)pdev->tx_descs.freelist;
366*5113495bSYour Name 	for (i = 0; i < num_page; i++) {
367*5113495bSYour Name 		for (i_int = 0; i_int < num_desc_per_page; i_int++) {
368*5113495bSYour Name 			if (i_int == (num_desc_per_page - 1)) {
369*5113495bSYour Name 				/*
370*5113495bSYour Name 				 * Last element on this page,
371*5113495bSYour Name 				 * should point next page
372*5113495bSYour Name 				 */
373*5113495bSYour Name 				if (!cacheable_pages[i + 1]) {
374*5113495bSYour Name 					ol_txrx_err("over flow num link %d",
375*5113495bSYour Name 						   num_link);
376*5113495bSYour Name 					goto free_htt_desc;
377*5113495bSYour Name 				}
378*5113495bSYour Name 				*p = (uint32_t *)cacheable_pages[i + 1];
379*5113495bSYour Name 			} else {
380*5113495bSYour Name 				*p = (uint32_t *)
381*5113495bSYour Name 					(((char *)p) + pdev->tx_descs.size);
382*5113495bSYour Name 			}
383*5113495bSYour Name 			num_link++;
384*5113495bSYour Name 			p = (uint32_t **) *p;
385*5113495bSYour Name 			/* Last link established exit */
386*5113495bSYour Name 			if (num_link == (pdev->tx_descs.pool_elems - 1))
387*5113495bSYour Name 				break;
388*5113495bSYour Name 		}
389*5113495bSYour Name 	}
390*5113495bSYour Name 	*p = NULL;
391*5113495bSYour Name 
392*5113495bSYour Name 	if (htt_tx_frag_desc_attach(pdev, desc_pool_elems)) {
393*5113495bSYour Name 		ol_txrx_err("HTT Frag descriptor alloc fail");
394*5113495bSYour Name 		goto free_htt_desc;
395*5113495bSYour Name 	}
396*5113495bSYour Name 
397*5113495bSYour Name 	/* success */
398*5113495bSYour Name 	return 0;
399*5113495bSYour Name 
400*5113495bSYour Name free_htt_desc:
401*5113495bSYour Name 	qdf_mem_multi_pages_free(pdev->osdev, &pdev->tx_descs.desc_pages,
402*5113495bSYour Name 				 qdf_get_dma_mem_context((&pdev->tx_descs),
403*5113495bSYour Name 							 memctx), true);
404*5113495bSYour Name out_fail:
405*5113495bSYour Name 	return -ENOBUFS;
406*5113495bSYour Name }
407*5113495bSYour Name 
htt_tx_detach(struct htt_pdev_t * pdev)408*5113495bSYour Name void htt_tx_detach(struct htt_pdev_t *pdev)
409*5113495bSYour Name {
410*5113495bSYour Name 	if (!pdev) {
411*5113495bSYour Name 		qdf_print("htt tx detach invalid instance");
412*5113495bSYour Name 		return;
413*5113495bSYour Name 	}
414*5113495bSYour Name 
415*5113495bSYour Name 	htt_tx_frag_desc_detach(pdev);
416*5113495bSYour Name 	qdf_mem_multi_pages_free(pdev->osdev, &pdev->tx_descs.desc_pages,
417*5113495bSYour Name 				 qdf_get_dma_mem_context((&pdev->tx_descs),
418*5113495bSYour Name 							 memctx), true);
419*5113495bSYour Name }
420*5113495bSYour Name 
421*5113495bSYour Name /**
422*5113495bSYour Name  * htt_tx_set_frag_desc_addr() - set up the fragmentation descriptor address
423*5113495bSYour Name  * @pdev: pointer to the HTT instance making the allocation
424*5113495bSYour Name  * @htt_tx_desc: Host tx descriptor that does not include HTC hdr
425*5113495bSYour Name  * @index: index to alloc htt tx desc
426*5113495bSYour Name  *
427*5113495bSYour Name  *
428*5113495bSYour Name  * Return: None
429*5113495bSYour Name  */
430*5113495bSYour Name static inline void
htt_tx_set_frag_desc_addr(struct htt_pdev_t * pdev,struct htt_tx_msdu_desc_t * htt_tx_desc,uint16_t index)431*5113495bSYour Name htt_tx_set_frag_desc_addr(struct htt_pdev_t *pdev,
432*5113495bSYour Name 			  struct htt_tx_msdu_desc_t *htt_tx_desc,
433*5113495bSYour Name 			  uint16_t index)
434*5113495bSYour Name {
435*5113495bSYour Name }
436*5113495bSYour Name 
437*5113495bSYour Name /**
438*5113495bSYour Name  * htt_tx_desc_frags_table_set() - set up the descriptor and payload
439*5113495bSYour Name  *				   to correspondinf fragments
440*5113495bSYour Name  * @pdev: pointer to the HTT instance making the allocation
441*5113495bSYour Name  * @htt_tx_desc: Host tx descriptor that does not include HTC hdr
442*5113495bSYour Name  * @paddr: fragment physical address
443*5113495bSYour Name  * @frag_desc_paddr_lo: frag descriptor address
444*5113495bSYour Name  * @reset: reset
445*5113495bSYour Name  *
446*5113495bSYour Name  * Return: None
447*5113495bSYour Name  */
htt_tx_desc_frags_table_set(htt_pdev_handle pdev,void * desc,qdf_dma_addr_t paddr,qdf_dma_addr_t frag_desc_paddr,int reset)448*5113495bSYour Name void htt_tx_desc_frags_table_set(htt_pdev_handle pdev,
449*5113495bSYour Name 				 void *desc,
450*5113495bSYour Name 				 qdf_dma_addr_t paddr,
451*5113495bSYour Name 				 qdf_dma_addr_t frag_desc_paddr,
452*5113495bSYour Name 				 int reset)
453*5113495bSYour Name {
454*5113495bSYour Name 	/* fragments table only applies to LL systems */
455*5113495bSYour Name }
456*5113495bSYour Name 
457*5113495bSYour Name /**
458*5113495bSYour Name  * htt_tx_credit_update() - get the number of credits by which the amount of
459*5113495bSYour Name  *			    target credits needs to be updated
460*5113495bSYour Name  * @pdev: htt context
461*5113495bSYour Name  *
462*5113495bSYour Name  * Return: number of credits
463*5113495bSYour Name  */
htt_tx_credit_update(struct htt_pdev_t * pdev)464*5113495bSYour Name int htt_tx_credit_update(struct htt_pdev_t *pdev)
465*5113495bSYour Name {
466*5113495bSYour Name 	int credit_delta;
467*5113495bSYour Name 
468*5113495bSYour Name 	credit_delta = QDF_MIN(qdf_atomic_read(
469*5113495bSYour Name 			&pdev->htt_tx_credit.target_delta),
470*5113495bSYour Name 			qdf_atomic_read(&pdev->htt_tx_credit.bus_delta));
471*5113495bSYour Name 	if (credit_delta) {
472*5113495bSYour Name 		qdf_atomic_add(-credit_delta,
473*5113495bSYour Name 			       &pdev->htt_tx_credit.target_delta);
474*5113495bSYour Name 		qdf_atomic_add(-credit_delta,
475*5113495bSYour Name 			       &pdev->htt_tx_credit.bus_delta);
476*5113495bSYour Name 	}
477*5113495bSYour Name 	return credit_delta;
478*5113495bSYour Name }
479*5113495bSYour Name 
480*5113495bSYour Name /**
481*5113495bSYour Name  * htt_tx_get_paddr() - get physical address for htt desc
482*5113495bSYour Name  *
483*5113495bSYour Name  * Get HTT descriptor physical address from virtual address
484*5113495bSYour Name  * Find page first and find offset
485*5113495bSYour Name  * Not required for HL systems
486*5113495bSYour Name  *
487*5113495bSYour Name  * Return: Physical address of descriptor
488*5113495bSYour Name  */
489*5113495bSYour Name static inline
htt_tx_get_paddr(htt_pdev_handle pdev,char * target_vaddr)490*5113495bSYour Name qdf_dma_addr_t htt_tx_get_paddr(htt_pdev_handle pdev,
491*5113495bSYour Name 				char *target_vaddr)
492*5113495bSYour Name {
493*5113495bSYour Name 	return 0;
494*5113495bSYour Name }
495*5113495bSYour Name 
496*5113495bSYour Name 
497*5113495bSYour Name #else
498*5113495bSYour Name 
htt_tx_attach(struct htt_pdev_t * pdev,int desc_pool_elems)499*5113495bSYour Name int htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems)
500*5113495bSYour Name {
501*5113495bSYour Name 	int i, i_int, pool_size;
502*5113495bSYour Name 	uint32_t **p;
503*5113495bSYour Name 	struct qdf_mem_dma_page_t *page_info;
504*5113495bSYour Name 	uint32_t num_link = 0;
505*5113495bSYour Name 	uint16_t num_page, num_desc_per_page;
506*5113495bSYour Name 
507*5113495bSYour Name 	htt_tx_desc_get_size(pdev);
508*5113495bSYour Name 
509*5113495bSYour Name 	/*
510*5113495bSYour Name 	 * Make sure tx_descs.size is a multiple of 4-bytes.
511*5113495bSYour Name 	 * It should be, but round up just to be sure.
512*5113495bSYour Name 	 */
513*5113495bSYour Name 	pdev->tx_descs.size = (pdev->tx_descs.size + 3) & (~0x3);
514*5113495bSYour Name 
515*5113495bSYour Name 	pdev->tx_descs.pool_elems = desc_pool_elems;
516*5113495bSYour Name 	pdev->tx_descs.alloc_cnt = 0;
517*5113495bSYour Name 	pool_size = pdev->tx_descs.pool_elems * pdev->tx_descs.size;
518*5113495bSYour Name 	qdf_mem_multi_pages_alloc(pdev->osdev, &pdev->tx_descs.desc_pages,
519*5113495bSYour Name 		pdev->tx_descs.size, pdev->tx_descs.pool_elems,
520*5113495bSYour Name 		qdf_get_dma_mem_context((&pdev->tx_descs), memctx), false);
521*5113495bSYour Name 	if ((0 == pdev->tx_descs.desc_pages.num_pages) ||
522*5113495bSYour Name 		(!pdev->tx_descs.desc_pages.dma_pages)) {
523*5113495bSYour Name 		ol_txrx_err("HTT desc alloc fail");
524*5113495bSYour Name 		goto out_fail;
525*5113495bSYour Name 	}
526*5113495bSYour Name 	num_page = pdev->tx_descs.desc_pages.num_pages;
527*5113495bSYour Name 	num_desc_per_page = pdev->tx_descs.desc_pages.num_element_per_page;
528*5113495bSYour Name 
529*5113495bSYour Name 	/* link tx descriptors into a freelist */
530*5113495bSYour Name 	page_info = pdev->tx_descs.desc_pages.dma_pages;
531*5113495bSYour Name 	pdev->tx_descs.freelist = (uint32_t *)page_info->page_v_addr_start;
532*5113495bSYour Name 	p = (uint32_t **) pdev->tx_descs.freelist;
533*5113495bSYour Name 	for (i = 0; i < num_page; i++) {
534*5113495bSYour Name 		for (i_int = 0; i_int < num_desc_per_page; i_int++) {
535*5113495bSYour Name 			if (i_int == (num_desc_per_page - 1)) {
536*5113495bSYour Name 				/*
537*5113495bSYour Name 				 * Last element on this page,
538*5113495bSYour Name 				 * should pint next page
539*5113495bSYour Name 				 */
540*5113495bSYour Name 				if (!page_info->page_v_addr_start) {
541*5113495bSYour Name 					ol_txrx_err("over flow num link %d",
542*5113495bSYour Name 						num_link);
543*5113495bSYour Name 					goto free_htt_desc;
544*5113495bSYour Name 				}
545*5113495bSYour Name 				page_info++;
546*5113495bSYour Name 				*p = (uint32_t *)page_info->page_v_addr_start;
547*5113495bSYour Name 			} else {
548*5113495bSYour Name 				*p = (uint32_t *)
549*5113495bSYour Name 					(((char *) p) + pdev->tx_descs.size);
550*5113495bSYour Name 			}
551*5113495bSYour Name 			num_link++;
552*5113495bSYour Name 			p = (uint32_t **) *p;
553*5113495bSYour Name 			/* Last link established exit */
554*5113495bSYour Name 			if (num_link == (pdev->tx_descs.pool_elems - 1))
555*5113495bSYour Name 				break;
556*5113495bSYour Name 		}
557*5113495bSYour Name 	}
558*5113495bSYour Name 	*p = NULL;
559*5113495bSYour Name 
560*5113495bSYour Name 	if (htt_tx_frag_desc_attach(pdev, desc_pool_elems)) {
561*5113495bSYour Name 		ol_txrx_err("HTT Frag descriptor alloc fail");
562*5113495bSYour Name 		goto free_htt_desc;
563*5113495bSYour Name 	}
564*5113495bSYour Name 
565*5113495bSYour Name 	/* success */
566*5113495bSYour Name 	return 0;
567*5113495bSYour Name 
568*5113495bSYour Name free_htt_desc:
569*5113495bSYour Name 	qdf_mem_multi_pages_free(pdev->osdev, &pdev->tx_descs.desc_pages,
570*5113495bSYour Name 		qdf_get_dma_mem_context((&pdev->tx_descs), memctx), false);
571*5113495bSYour Name out_fail:
572*5113495bSYour Name 	return -ENOBUFS;
573*5113495bSYour Name }
574*5113495bSYour Name 
htt_tx_detach(struct htt_pdev_t * pdev)575*5113495bSYour Name void htt_tx_detach(struct htt_pdev_t *pdev)
576*5113495bSYour Name {
577*5113495bSYour Name 	if (!pdev) {
578*5113495bSYour Name 		qdf_print("htt tx detach invalid instance");
579*5113495bSYour Name 		return;
580*5113495bSYour Name 	}
581*5113495bSYour Name 
582*5113495bSYour Name 	htt_tx_frag_desc_detach(pdev);
583*5113495bSYour Name 	qdf_mem_multi_pages_free(pdev->osdev, &pdev->tx_descs.desc_pages,
584*5113495bSYour Name 		qdf_get_dma_mem_context((&pdev->tx_descs), memctx), false);
585*5113495bSYour Name }
586*5113495bSYour Name 
587*5113495bSYour Name static void
htt_tx_set_frag_desc_addr(struct htt_pdev_t * pdev,struct htt_tx_msdu_desc_t * htt_tx_desc,uint16_t index)588*5113495bSYour Name htt_tx_set_frag_desc_addr(struct htt_pdev_t *pdev,
589*5113495bSYour Name 			  struct htt_tx_msdu_desc_t *htt_tx_desc,
590*5113495bSYour Name 			  uint16_t index)
591*5113495bSYour Name {
592*5113495bSYour Name 	uint32_t *fragmentation_descr_field_ptr;
593*5113495bSYour Name 
594*5113495bSYour Name 	fragmentation_descr_field_ptr = (uint32_t *)
595*5113495bSYour Name 		((uint32_t *)htt_tx_desc) +
596*5113495bSYour Name 		HTT_TX_DESC_FRAGS_DESC_PADDR_OFFSET_DWORD;
597*5113495bSYour Name 	/*
598*5113495bSYour Name 	 * The fragmentation descriptor is allocated from consistent
599*5113495bSYour Name 	 * memory. Therefore, we can use the address directly rather
600*5113495bSYour Name 	 * than having to map it from a virtual/CPU address to a
601*5113495bSYour Name 	 * physical/bus address.
602*5113495bSYour Name 	 */
603*5113495bSYour Name 	htt_tx_frag_desc_field_update(pdev, fragmentation_descr_field_ptr,
604*5113495bSYour Name 				      index, htt_tx_desc);
605*5113495bSYour Name 
606*5113495bSYour Name 		return;
607*5113495bSYour Name }
608*5113495bSYour Name 
htt_tx_desc_frags_table_set(htt_pdev_handle pdev,void * htt_tx_desc,qdf_dma_addr_t paddr,qdf_dma_addr_t frag_desc_paddr,int reset)609*5113495bSYour Name void htt_tx_desc_frags_table_set(htt_pdev_handle pdev,
610*5113495bSYour Name 				 void *htt_tx_desc,
611*5113495bSYour Name 				 qdf_dma_addr_t paddr,
612*5113495bSYour Name 				 qdf_dma_addr_t frag_desc_paddr,
613*5113495bSYour Name 				 int reset)
614*5113495bSYour Name {
615*5113495bSYour Name 	uint32_t *fragmentation_descr_field_ptr;
616*5113495bSYour Name 
617*5113495bSYour Name 	fragmentation_descr_field_ptr = (uint32_t *)
618*5113495bSYour Name 		((uint32_t *) htt_tx_desc) +
619*5113495bSYour Name 		HTT_TX_DESC_FRAGS_DESC_PADDR_OFFSET_DWORD;
620*5113495bSYour Name 	if (reset) {
621*5113495bSYour Name #if defined(HELIUMPLUS)
622*5113495bSYour Name 		*fragmentation_descr_field_ptr = frag_desc_paddr;
623*5113495bSYour Name #else
624*5113495bSYour Name 		*fragmentation_descr_field_ptr =
625*5113495bSYour Name 			htt_tx_get_paddr(pdev, htt_tx_desc) + HTT_TX_DESC_LEN;
626*5113495bSYour Name #endif
627*5113495bSYour Name 	} else {
628*5113495bSYour Name 		*fragmentation_descr_field_ptr = paddr;
629*5113495bSYour Name 	}
630*5113495bSYour Name }
631*5113495bSYour Name 
htt_tx_pending_discard(htt_pdev_handle pdev)632*5113495bSYour Name void htt_tx_pending_discard(htt_pdev_handle pdev)
633*5113495bSYour Name {
634*5113495bSYour Name 	htc_flush_surprise_remove(pdev->htc_pdev);
635*5113495bSYour Name }
636*5113495bSYour Name 
htt_tx_get_paddr(htt_pdev_handle pdev,char * target_vaddr)637*5113495bSYour Name static qdf_dma_addr_t htt_tx_get_paddr(htt_pdev_handle pdev,
638*5113495bSYour Name 				char *target_vaddr)
639*5113495bSYour Name {
640*5113495bSYour Name 	uint16_t i;
641*5113495bSYour Name 	struct qdf_mem_dma_page_t *page_info = NULL;
642*5113495bSYour Name 	uint64_t offset;
643*5113495bSYour Name 
644*5113495bSYour Name 	for (i = 0; i < pdev->tx_descs.desc_pages.num_pages; i++) {
645*5113495bSYour Name 		page_info = pdev->tx_descs.desc_pages.dma_pages + i;
646*5113495bSYour Name 		if (!page_info->page_v_addr_start) {
647*5113495bSYour Name 			qdf_assert(0);
648*5113495bSYour Name 			return 0;
649*5113495bSYour Name 		}
650*5113495bSYour Name 		if ((target_vaddr >= page_info->page_v_addr_start) &&
651*5113495bSYour Name 			(target_vaddr <= page_info->page_v_addr_end))
652*5113495bSYour Name 			break;
653*5113495bSYour Name 	}
654*5113495bSYour Name 
655*5113495bSYour Name 	if (!page_info) {
656*5113495bSYour Name 		ol_txrx_err("invalid page_info");
657*5113495bSYour Name 		return 0;
658*5113495bSYour Name 	}
659*5113495bSYour Name 
660*5113495bSYour Name 	offset = (uint64_t)(target_vaddr - page_info->page_v_addr_start);
661*5113495bSYour Name 	return page_info->page_p_addr + offset;
662*5113495bSYour Name }
663*5113495bSYour Name 
664*5113495bSYour Name #endif
665*5113495bSYour Name 
666*5113495bSYour Name /*--- descriptor allocation functions ---------------------------------------*/
667*5113495bSYour Name 
htt_tx_desc_alloc(htt_pdev_handle pdev,qdf_dma_addr_t * paddr,uint16_t index)668*5113495bSYour Name void *htt_tx_desc_alloc(htt_pdev_handle pdev, qdf_dma_addr_t *paddr,
669*5113495bSYour Name 			uint16_t index)
670*5113495bSYour Name {
671*5113495bSYour Name 	struct htt_host_tx_desc_t *htt_host_tx_desc;    /* includes HTC hdr */
672*5113495bSYour Name 	struct htt_tx_msdu_desc_t *htt_tx_desc; /* doesn't include  HTC hdr */
673*5113495bSYour Name 
674*5113495bSYour Name 	htt_host_tx_desc = (struct htt_host_tx_desc_t *)pdev->tx_descs.freelist;
675*5113495bSYour Name 	if (!htt_host_tx_desc)
676*5113495bSYour Name 		return NULL;    /* pool is exhausted */
677*5113495bSYour Name 
678*5113495bSYour Name 	htt_tx_desc = &htt_host_tx_desc->align32.tx_desc;
679*5113495bSYour Name 
680*5113495bSYour Name 	if (pdev->tx_descs.freelist) {
681*5113495bSYour Name 		pdev->tx_descs.freelist =
682*5113495bSYour Name 			*((uint32_t **) pdev->tx_descs.freelist);
683*5113495bSYour Name 		pdev->tx_descs.alloc_cnt++;
684*5113495bSYour Name 	}
685*5113495bSYour Name 	/*
686*5113495bSYour Name 	 * For LL, set up the fragmentation descriptor address.
687*5113495bSYour Name 	 * Currently, this HTT tx desc allocation is performed once up front.
688*5113495bSYour Name 	 * If this is changed to have the allocation done during tx, then it
689*5113495bSYour Name 	 * would be helpful to have separate htt_tx_desc_alloc functions for
690*5113495bSYour Name 	 * HL vs. LL, to remove the below conditional branch.
691*5113495bSYour Name 	 */
692*5113495bSYour Name 	htt_tx_set_frag_desc_addr(pdev, htt_tx_desc, index);
693*5113495bSYour Name 
694*5113495bSYour Name 	/*
695*5113495bSYour Name 	 * Include the headroom for the HTC frame header when specifying the
696*5113495bSYour Name 	 * physical address for the HTT tx descriptor.
697*5113495bSYour Name 	 */
698*5113495bSYour Name 	*paddr = (qdf_dma_addr_t)htt_tx_get_paddr(pdev,
699*5113495bSYour Name 						  (char *)htt_host_tx_desc);
700*5113495bSYour Name 	/*
701*5113495bSYour Name 	 * The allocated tx descriptor space includes headroom for a
702*5113495bSYour Name 	 * HTC frame header.  Hide this headroom, so that we don't have
703*5113495bSYour Name 	 * to jump past the headroom each time we program a field within
704*5113495bSYour Name 	 * the tx desc, but only once when we download the tx desc (and
705*5113495bSYour Name 	 * the headroom) to the target via HTC.
706*5113495bSYour Name 	 * Skip past the headroom and return the address of the HTT tx desc.
707*5113495bSYour Name 	 */
708*5113495bSYour Name 	return (void *)htt_tx_desc;
709*5113495bSYour Name }
710*5113495bSYour Name 
htt_tx_desc_free(htt_pdev_handle pdev,void * tx_desc)711*5113495bSYour Name void htt_tx_desc_free(htt_pdev_handle pdev, void *tx_desc)
712*5113495bSYour Name {
713*5113495bSYour Name 	char *htt_host_tx_desc = tx_desc;
714*5113495bSYour Name 	/* rewind over the HTC frame header space */
715*5113495bSYour Name 	htt_host_tx_desc -=
716*5113495bSYour Name 		offsetof(struct htt_host_tx_desc_t, align32.tx_desc);
717*5113495bSYour Name 	*((uint32_t **) htt_host_tx_desc) = pdev->tx_descs.freelist;
718*5113495bSYour Name 	pdev->tx_descs.freelist = (uint32_t *) htt_host_tx_desc;
719*5113495bSYour Name 	pdev->tx_descs.alloc_cnt--;
720*5113495bSYour Name }
721*5113495bSYour Name 
722*5113495bSYour Name /*--- descriptor field access methods ---------------------------------------*/
723*5113495bSYour Name 
724*5113495bSYour Name /* PUT THESE AS inline IN ol_htt_tx_api.h */
725*5113495bSYour Name 
htt_tx_desc_flag_postponed(htt_pdev_handle pdev,void * desc)726*5113495bSYour Name void htt_tx_desc_flag_postponed(htt_pdev_handle pdev, void *desc)
727*5113495bSYour Name {
728*5113495bSYour Name }
729*5113495bSYour Name 
htt_tx_desc_flag_batch_more(htt_pdev_handle pdev,void * desc)730*5113495bSYour Name void htt_tx_desc_flag_batch_more(htt_pdev_handle pdev, void *desc)
731*5113495bSYour Name {
732*5113495bSYour Name }
733*5113495bSYour Name 
734*5113495bSYour Name /*--- tx send function ------------------------------------------------------*/
735*5113495bSYour Name 
736*5113495bSYour Name #ifdef ATH_11AC_TXCOMPACT
737*5113495bSYour Name 
738*5113495bSYour Name /*
739*5113495bSYour Name  * Scheduling the Queued packets in HTT which could not be sent out
740*5113495bSYour Name  * because of No CE desc
741*5113495bSYour Name  */
htt_tx_sched(htt_pdev_handle pdev)742*5113495bSYour Name void htt_tx_sched(htt_pdev_handle pdev)
743*5113495bSYour Name {
744*5113495bSYour Name 	qdf_nbuf_t msdu;
745*5113495bSYour Name 	int download_len = pdev->download_len;
746*5113495bSYour Name 	int packet_len;
747*5113495bSYour Name 
748*5113495bSYour Name 	HTT_TX_NBUF_QUEUE_REMOVE(pdev, msdu);
749*5113495bSYour Name 	while (msdu) {
750*5113495bSYour Name 		int not_accepted;
751*5113495bSYour Name 		/* packet length includes HTT tx desc frag added above */
752*5113495bSYour Name 		packet_len = qdf_nbuf_len(msdu);
753*5113495bSYour Name 		if (packet_len < download_len) {
754*5113495bSYour Name 			/*
755*5113495bSYour Name 			 * This case of packet length being less than the
756*5113495bSYour Name 			 * nominal download length can happen for a couple
757*5113495bSYour Name 			 * of reasons:
758*5113495bSYour Name 			 * In HL, the nominal download length is a large
759*5113495bSYour Name 			 * artificial value.
760*5113495bSYour Name 			 * In LL, the frame may not have the optional header
761*5113495bSYour Name 			 * fields accounted for in the nominal download size
762*5113495bSYour Name 			 * (LLC/SNAP header, IPv4 or IPv6 header).
763*5113495bSYour Name 			 */
764*5113495bSYour Name 			download_len = packet_len;
765*5113495bSYour Name 		}
766*5113495bSYour Name 
767*5113495bSYour Name 		not_accepted =
768*5113495bSYour Name 			htc_send_data_pkt(pdev->htc_pdev, msdu,
769*5113495bSYour Name 					  pdev->htc_tx_endpoint,
770*5113495bSYour Name 					  download_len);
771*5113495bSYour Name 		if (not_accepted) {
772*5113495bSYour Name 			HTT_TX_NBUF_QUEUE_INSERT_HEAD(pdev, msdu);
773*5113495bSYour Name 			return;
774*5113495bSYour Name 		}
775*5113495bSYour Name 		HTT_TX_NBUF_QUEUE_REMOVE(pdev, msdu);
776*5113495bSYour Name 	}
777*5113495bSYour Name }
778*5113495bSYour Name 
htt_tx_send_std(htt_pdev_handle pdev,qdf_nbuf_t msdu,uint16_t msdu_id)779*5113495bSYour Name int htt_tx_send_std(htt_pdev_handle pdev, qdf_nbuf_t msdu, uint16_t msdu_id)
780*5113495bSYour Name {
781*5113495bSYour Name 
782*5113495bSYour Name 	int download_len = pdev->download_len;
783*5113495bSYour Name 
784*5113495bSYour Name 	int packet_len;
785*5113495bSYour Name 
786*5113495bSYour Name 	/* packet length includes HTT tx desc frag added above */
787*5113495bSYour Name 	packet_len = qdf_nbuf_len(msdu);
788*5113495bSYour Name 	if (packet_len < download_len) {
789*5113495bSYour Name 		/*
790*5113495bSYour Name 		 * This case of packet length being less than the nominal
791*5113495bSYour Name 		 * download length can happen for a couple of reasons:
792*5113495bSYour Name 		 * In HL, the nominal download length is a large artificial
793*5113495bSYour Name 		 * value.
794*5113495bSYour Name 		 * In LL, the frame may not have the optional header fields
795*5113495bSYour Name 		 * accounted for in the nominal download size (LLC/SNAP header,
796*5113495bSYour Name 		 * IPv4 or IPv6 header).
797*5113495bSYour Name 		 */
798*5113495bSYour Name 		download_len = packet_len;
799*5113495bSYour Name 	}
800*5113495bSYour Name 
801*5113495bSYour Name 	if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(msdu))
802*5113495bSYour Name 		download_len += sizeof(struct htt_tx_msdu_desc_ext_t);
803*5113495bSYour Name 
804*5113495bSYour Name 
805*5113495bSYour Name 	QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu, QDF_NBUF_TX_PKT_HTT);
806*5113495bSYour Name 	DPTRACE(qdf_dp_trace(msdu, QDF_DP_TRACE_HTT_PACKET_PTR_RECORD,
807*5113495bSYour Name 				QDF_TRACE_DEFAULT_PDEV_ID,
808*5113495bSYour Name 				qdf_nbuf_data_addr(msdu),
809*5113495bSYour Name 				sizeof(qdf_nbuf_data(msdu)), QDF_TX));
810*5113495bSYour Name 	if (qdf_nbuf_queue_len(&pdev->txnbufq) > 0) {
811*5113495bSYour Name 		HTT_TX_NBUF_QUEUE_ADD(pdev, msdu);
812*5113495bSYour Name 		htt_tx_sched(pdev);
813*5113495bSYour Name 		return 0;
814*5113495bSYour Name 	}
815*5113495bSYour Name 
816*5113495bSYour Name 	if (htc_send_data_pkt(pdev->htc_pdev, msdu,
817*5113495bSYour Name 			      pdev->htc_tx_endpoint, download_len)) {
818*5113495bSYour Name 		HTT_TX_NBUF_QUEUE_ADD(pdev, msdu);
819*5113495bSYour Name 	}
820*5113495bSYour Name 
821*5113495bSYour Name 	return 0;               /* success */
822*5113495bSYour Name 
823*5113495bSYour Name }
824*5113495bSYour Name 
825*5113495bSYour Name #ifndef CONFIG_HL_SUPPORT
826*5113495bSYour Name #ifdef FEATURE_RUNTIME_PM
827*5113495bSYour Name /**
828*5113495bSYour Name  * htt_tx_resume_handler() - resume callback for the htt endpoint
829*5113495bSYour Name  * @context: a pointer to the htt context
830*5113495bSYour Name  *
831*5113495bSYour Name  * runs htt_tx_sched.
832*5113495bSYour Name  */
htt_tx_resume_handler(void * context)833*5113495bSYour Name void htt_tx_resume_handler(void *context)
834*5113495bSYour Name {
835*5113495bSYour Name 	struct htt_pdev_t *pdev =  (struct htt_pdev_t *) context;
836*5113495bSYour Name 
837*5113495bSYour Name 	htt_tx_sched(pdev);
838*5113495bSYour Name }
839*5113495bSYour Name #else
840*5113495bSYour Name void
htt_tx_resume_handler(void * context)841*5113495bSYour Name htt_tx_resume_handler(void *context) { }
842*5113495bSYour Name #endif
843*5113495bSYour Name #endif
844*5113495bSYour Name 
845*5113495bSYour Name qdf_nbuf_t
htt_tx_send_batch(htt_pdev_handle pdev,qdf_nbuf_t head_msdu,int num_msdus)846*5113495bSYour Name htt_tx_send_batch(htt_pdev_handle pdev, qdf_nbuf_t head_msdu, int num_msdus)
847*5113495bSYour Name {
848*5113495bSYour Name 	qdf_print("Not apply to LL");
849*5113495bSYour Name 	qdf_assert(0);
850*5113495bSYour Name 	return head_msdu;
851*5113495bSYour Name 
852*5113495bSYour Name }
853*5113495bSYour Name 
854*5113495bSYour Name int
htt_tx_send_nonstd(htt_pdev_handle pdev,qdf_nbuf_t msdu,uint16_t msdu_id,enum htt_pkt_type pkt_type)855*5113495bSYour Name htt_tx_send_nonstd(htt_pdev_handle pdev,
856*5113495bSYour Name 		   qdf_nbuf_t msdu,
857*5113495bSYour Name 		   uint16_t msdu_id, enum htt_pkt_type pkt_type)
858*5113495bSYour Name {
859*5113495bSYour Name 	int download_len;
860*5113495bSYour Name 
861*5113495bSYour Name 	/*
862*5113495bSYour Name 	 * The pkt_type could be checked to see what L2 header type is present,
863*5113495bSYour Name 	 * and then the L2 header could be examined to determine its length.
864*5113495bSYour Name 	 * But for simplicity, just use the maximum possible header size,
865*5113495bSYour Name 	 * rather than computing the actual header size.
866*5113495bSYour Name 	 */
867*5113495bSYour Name 	download_len = sizeof(struct htt_host_tx_desc_t)
868*5113495bSYour Name 		+ HTT_TX_HDR_SIZE_OUTER_HDR_MAX /* worst case */
869*5113495bSYour Name 		+ HTT_TX_HDR_SIZE_802_1Q
870*5113495bSYour Name 		+ HTT_TX_HDR_SIZE_LLC_SNAP
871*5113495bSYour Name 		+ ol_cfg_tx_download_size(pdev->ctrl_pdev);
872*5113495bSYour Name 	qdf_assert(download_len <= pdev->download_len);
873*5113495bSYour Name 	return htt_tx_send_std(pdev, msdu, msdu_id);
874*5113495bSYour Name }
875*5113495bSYour Name 
876*5113495bSYour Name #ifndef QCA_TX_PADDING_CREDIT_SUPPORT
htt_tx_padding_credit_update_handler(void * context,int pad_credit)877*5113495bSYour Name int htt_tx_padding_credit_update_handler(void *context, int pad_credit)
878*5113495bSYour Name {
879*5113495bSYour Name 	return 1;
880*5113495bSYour Name }
881*5113495bSYour Name #endif
882*5113495bSYour Name 
883*5113495bSYour Name #else                           /*ATH_11AC_TXCOMPACT */
884*5113495bSYour Name 
885*5113495bSYour Name #ifdef QCA_TX_PADDING_CREDIT_SUPPORT
htt_tx_padding_credit_update(htt_pdev_handle htt_pdev,int pad_credit)886*5113495bSYour Name static int htt_tx_padding_credit_update(htt_pdev_handle htt_pdev,
887*5113495bSYour Name 					int pad_credit)
888*5113495bSYour Name {
889*5113495bSYour Name 	int ret = 0;
890*5113495bSYour Name 
891*5113495bSYour Name 	if (pad_credit)
892*5113495bSYour Name 		qdf_atomic_add(pad_credit,
893*5113495bSYour Name 			       &htt_pdev->txrx_pdev->pad_reserve_tx_credit);
894*5113495bSYour Name 
895*5113495bSYour Name 	ret = qdf_atomic_read(&htt_pdev->txrx_pdev->pad_reserve_tx_credit);
896*5113495bSYour Name 
897*5113495bSYour Name 	return ret;
898*5113495bSYour Name }
899*5113495bSYour Name 
htt_tx_padding_credit_update_handler(void * context,int pad_credit)900*5113495bSYour Name int htt_tx_padding_credit_update_handler(void *context, int pad_credit)
901*5113495bSYour Name {
902*5113495bSYour Name 	struct htt_pdev_t *htt_pdev = (struct htt_pdev_t *)context;
903*5113495bSYour Name 
904*5113495bSYour Name 	return htt_tx_padding_credit_update(htt_pdev, pad_credit);
905*5113495bSYour Name }
906*5113495bSYour Name #else
htt_tx_padding_credit_update_handler(void * context,int pad_credit)907*5113495bSYour Name int htt_tx_padding_credit_update_handler(void *context, int pad_credit)
908*5113495bSYour Name {
909*5113495bSYour Name 	return 1;
910*5113495bSYour Name }
911*5113495bSYour Name #endif
912*5113495bSYour Name 
913*5113495bSYour Name #ifdef QCA_TX_HTT2_SUPPORT
914*5113495bSYour Name static inline HTC_ENDPOINT_ID
htt_tx_htt2_get_ep_id(htt_pdev_handle pdev,qdf_nbuf_t msdu)915*5113495bSYour Name htt_tx_htt2_get_ep_id(htt_pdev_handle pdev, qdf_nbuf_t msdu)
916*5113495bSYour Name {
917*5113495bSYour Name 	/*
918*5113495bSYour Name 	 * TX HTT2 service mainly for small sized frame and check if
919*5113495bSYour Name 	 * this candidate frame allow or not.
920*5113495bSYour Name 	 */
921*5113495bSYour Name 	if ((pdev->htc_tx_htt2_endpoint != ENDPOINT_UNUSED) &&
922*5113495bSYour Name 	    qdf_nbuf_get_tx_parallel_dnload_frm(msdu) &&
923*5113495bSYour Name 	    (qdf_nbuf_len(msdu) < pdev->htc_tx_htt2_max_size))
924*5113495bSYour Name 		return pdev->htc_tx_htt2_endpoint;
925*5113495bSYour Name 	else
926*5113495bSYour Name 		return pdev->htc_tx_endpoint;
927*5113495bSYour Name }
928*5113495bSYour Name #else
929*5113495bSYour Name #define htt_tx_htt2_get_ep_id(pdev, msdu)     (pdev->htc_tx_endpoint)
930*5113495bSYour Name #endif /* QCA_TX_HTT2_SUPPORT */
931*5113495bSYour Name 
932*5113495bSYour Name static inline int
htt_tx_send_base(htt_pdev_handle pdev,qdf_nbuf_t msdu,uint16_t msdu_id,int download_len,uint8_t more_data)933*5113495bSYour Name htt_tx_send_base(htt_pdev_handle pdev,
934*5113495bSYour Name 		 qdf_nbuf_t msdu,
935*5113495bSYour Name 		 uint16_t msdu_id, int download_len, uint8_t more_data)
936*5113495bSYour Name {
937*5113495bSYour Name 	struct htt_host_tx_desc_t *htt_host_tx_desc;
938*5113495bSYour Name 	struct htt_htc_pkt *pkt;
939*5113495bSYour Name 	int packet_len;
940*5113495bSYour Name 	HTC_ENDPOINT_ID ep_id;
941*5113495bSYour Name 
942*5113495bSYour Name 	/*
943*5113495bSYour Name 	 * The HTT tx descriptor was attached as the prefix fragment to the
944*5113495bSYour Name 	 * msdu netbuf during the call to htt_tx_desc_init.
945*5113495bSYour Name 	 * Retrieve it so we can provide its HTC header space to HTC.
946*5113495bSYour Name 	 */
947*5113495bSYour Name 	htt_host_tx_desc = (struct htt_host_tx_desc_t *)
948*5113495bSYour Name 			   qdf_nbuf_get_frag_vaddr(msdu, 0);
949*5113495bSYour Name 
950*5113495bSYour Name 	pkt = htt_htc_pkt_alloc(pdev);
951*5113495bSYour Name 	if (!pkt)
952*5113495bSYour Name 		return -ENOBUFS;       /* failure */
953*5113495bSYour Name 
954*5113495bSYour Name 	pkt->msdu_id = msdu_id;
955*5113495bSYour Name 	pkt->pdev_ctxt = pdev->txrx_pdev;
956*5113495bSYour Name 
957*5113495bSYour Name 	/* packet length includes HTT tx desc frag added above */
958*5113495bSYour Name 	packet_len = qdf_nbuf_len(msdu);
959*5113495bSYour Name 	if (packet_len < download_len) {
960*5113495bSYour Name 		/*
961*5113495bSYour Name 		 * This case of packet length being less than the nominal
962*5113495bSYour Name 		 * download length can happen for a couple reasons:
963*5113495bSYour Name 		 * In HL, the nominal download length is a large artificial
964*5113495bSYour Name 		 * value.
965*5113495bSYour Name 		 * In LL, the frame may not have the optional header fields
966*5113495bSYour Name 		 * accounted for in the nominal download size (LLC/SNAP header,
967*5113495bSYour Name 		 * IPv4 or IPv6 header).
968*5113495bSYour Name 		 */
969*5113495bSYour Name 		download_len = packet_len;
970*5113495bSYour Name 	}
971*5113495bSYour Name 
972*5113495bSYour Name 	ep_id = htt_tx_htt2_get_ep_id(pdev, msdu);
973*5113495bSYour Name 
974*5113495bSYour Name 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
975*5113495bSYour Name 			       pdev->tx_send_complete_part2,
976*5113495bSYour Name 			       (unsigned char *)htt_host_tx_desc,
977*5113495bSYour Name 			       download_len - HTC_HDR_LENGTH,
978*5113495bSYour Name 			       ep_id,
979*5113495bSYour Name 			       1); /* tag - not relevant here */
980*5113495bSYour Name 
981*5113495bSYour Name 	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msdu);
982*5113495bSYour Name 
983*5113495bSYour Name 	QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu, QDF_NBUF_TX_PKT_HTT);
984*5113495bSYour Name 	DPTRACE(qdf_dp_trace(msdu, QDF_DP_TRACE_HTT_PACKET_PTR_RECORD,
985*5113495bSYour Name 				QDF_TRACE_DEFAULT_PDEV_ID,
986*5113495bSYour Name 				qdf_nbuf_data_addr(msdu),
987*5113495bSYour Name 				sizeof(qdf_nbuf_data(msdu)), QDF_TX));
988*5113495bSYour Name 	htc_send_data_pkt(pdev->htc_pdev, &pkt->htc_pkt, more_data);
989*5113495bSYour Name 
990*5113495bSYour Name 	return 0;               /* success */
991*5113495bSYour Name }
992*5113495bSYour Name 
993*5113495bSYour Name qdf_nbuf_t
htt_tx_send_batch(htt_pdev_handle pdev,qdf_nbuf_t head_msdu,int num_msdus)994*5113495bSYour Name htt_tx_send_batch(htt_pdev_handle pdev, qdf_nbuf_t head_msdu, int num_msdus)
995*5113495bSYour Name {
996*5113495bSYour Name 	qdf_nbuf_t rejected = NULL;
997*5113495bSYour Name 	uint16_t *msdu_id_storage;
998*5113495bSYour Name 	uint16_t msdu_id;
999*5113495bSYour Name 	qdf_nbuf_t msdu;
1000*5113495bSYour Name 
1001*5113495bSYour Name 	/*
1002*5113495bSYour Name 	 * FOR NOW, iterate through the batch, sending the frames singly.
1003*5113495bSYour Name 	 * Eventually HTC and HIF should be able to accept a batch of
1004*5113495bSYour Name 	 * data frames rather than singles.
1005*5113495bSYour Name 	 */
1006*5113495bSYour Name 	msdu = head_msdu;
1007*5113495bSYour Name 	while (num_msdus--) {
1008*5113495bSYour Name 		qdf_nbuf_t next_msdu = qdf_nbuf_next(msdu);
1009*5113495bSYour Name 
1010*5113495bSYour Name 		msdu_id_storage = ol_tx_msdu_id_storage(msdu);
1011*5113495bSYour Name 		msdu_id = *msdu_id_storage;
1012*5113495bSYour Name 
1013*5113495bSYour Name 		/* htt_tx_send_base returns 0 as success and 1 as failure */
1014*5113495bSYour Name 		if (htt_tx_send_base(pdev, msdu, msdu_id, pdev->download_len,
1015*5113495bSYour Name 				     num_msdus)) {
1016*5113495bSYour Name 			qdf_nbuf_set_next(msdu, rejected);
1017*5113495bSYour Name 			rejected = msdu;
1018*5113495bSYour Name 		}
1019*5113495bSYour Name 		msdu = next_msdu;
1020*5113495bSYour Name 	}
1021*5113495bSYour Name 	return rejected;
1022*5113495bSYour Name }
1023*5113495bSYour Name 
1024*5113495bSYour Name int
htt_tx_send_nonstd(htt_pdev_handle pdev,qdf_nbuf_t msdu,uint16_t msdu_id,enum htt_pkt_type pkt_type)1025*5113495bSYour Name htt_tx_send_nonstd(htt_pdev_handle pdev,
1026*5113495bSYour Name 		   qdf_nbuf_t msdu,
1027*5113495bSYour Name 		   uint16_t msdu_id, enum htt_pkt_type pkt_type)
1028*5113495bSYour Name {
1029*5113495bSYour Name 	int download_len;
1030*5113495bSYour Name 
1031*5113495bSYour Name 	/*
1032*5113495bSYour Name 	 * The pkt_type could be checked to see what L2 header type is present,
1033*5113495bSYour Name 	 * and then the L2 header could be examined to determine its length.
1034*5113495bSYour Name 	 * But for simplicity, just use the maximum possible header size,
1035*5113495bSYour Name 	 * rather than computing the actual header size.
1036*5113495bSYour Name 	 */
1037*5113495bSYour Name 	download_len = sizeof(struct htt_host_tx_desc_t)
1038*5113495bSYour Name 		+ HTT_TX_HDR_SIZE_OUTER_HDR_MAX      /* worst case */
1039*5113495bSYour Name 		+ HTT_TX_HDR_SIZE_802_1Q
1040*5113495bSYour Name 		+ HTT_TX_HDR_SIZE_LLC_SNAP
1041*5113495bSYour Name 		+ ol_cfg_tx_download_size(pdev->ctrl_pdev);
1042*5113495bSYour Name 	return htt_tx_send_base(pdev, msdu, msdu_id, download_len, 0);
1043*5113495bSYour Name }
1044*5113495bSYour Name 
htt_tx_send_std(htt_pdev_handle pdev,qdf_nbuf_t msdu,uint16_t msdu_id)1045*5113495bSYour Name int htt_tx_send_std(htt_pdev_handle pdev, qdf_nbuf_t msdu, uint16_t msdu_id)
1046*5113495bSYour Name {
1047*5113495bSYour Name 	return htt_tx_send_base(pdev, msdu, msdu_id, pdev->download_len, 0);
1048*5113495bSYour Name }
1049*5113495bSYour Name 
1050*5113495bSYour Name #endif /*ATH_11AC_TXCOMPACT */
1051*5113495bSYour Name 
1052*5113495bSYour Name #if defined(HTT_DBG)
htt_tx_desc_display(void * tx_desc)1053*5113495bSYour Name void htt_tx_desc_display(void *tx_desc)
1054*5113495bSYour Name {
1055*5113495bSYour Name 	struct htt_tx_msdu_desc_t *htt_tx_desc;
1056*5113495bSYour Name 
1057*5113495bSYour Name 	htt_tx_desc = (struct htt_tx_msdu_desc_t *)tx_desc;
1058*5113495bSYour Name 
1059*5113495bSYour Name 	/* only works for little-endian */
1060*5113495bSYour Name 	qdf_debug("HTT tx desc (@ %pK):", htt_tx_desc);
1061*5113495bSYour Name 	qdf_debug("  msg type = %d", htt_tx_desc->msg_type);
1062*5113495bSYour Name 	qdf_debug("  pkt subtype = %d", htt_tx_desc->pkt_subtype);
1063*5113495bSYour Name 	qdf_debug("  pkt type = %d", htt_tx_desc->pkt_type);
1064*5113495bSYour Name 	qdf_debug("  vdev ID = %d", htt_tx_desc->vdev_id);
1065*5113495bSYour Name 	qdf_debug("  ext TID = %d", htt_tx_desc->ext_tid);
1066*5113495bSYour Name 	qdf_debug("  postponed = %d", htt_tx_desc->postponed);
1067*5113495bSYour Name 	qdf_debug("  extension = %d", htt_tx_desc->extension);
1068*5113495bSYour Name 	qdf_debug("  cksum_offload = %d", htt_tx_desc->cksum_offload);
1069*5113495bSYour Name 	qdf_debug("  tx_compl_req= %d", htt_tx_desc->tx_compl_req);
1070*5113495bSYour Name 	qdf_debug("  length = %d", htt_tx_desc->len);
1071*5113495bSYour Name 	qdf_debug("  id = %d", htt_tx_desc->id);
1072*5113495bSYour Name #if HTT_PADDR64
1073*5113495bSYour Name 	qdf_debug("  frag desc addr.lo = %#x",
1074*5113495bSYour Name 		  htt_tx_desc->frags_desc_ptr.lo);
1075*5113495bSYour Name 	qdf_debug("  frag desc addr.hi = %#x",
1076*5113495bSYour Name 		  htt_tx_desc->frags_desc_ptr.hi);
1077*5113495bSYour Name #else /* ! HTT_PADDR64 */
1078*5113495bSYour Name 	qdf_debug("  frag desc addr = %#x", htt_tx_desc->frags_desc_ptr);
1079*5113495bSYour Name #endif /* HTT_PADDR64 */
1080*5113495bSYour Name 	qdf_debug("  peerid = %d", htt_tx_desc->peerid);
1081*5113495bSYour Name 	qdf_debug("  chanfreq = %d", htt_tx_desc->chanfreq);
1082*5113495bSYour Name }
1083*5113495bSYour Name #endif
1084*5113495bSYour Name 
1085*5113495bSYour Name #ifdef IPA_OFFLOAD
1086*5113495bSYour Name #ifdef QCA_WIFI_3_0
1087*5113495bSYour Name 
1088*5113495bSYour Name #ifndef LIMIT_IPA_TX_BUFFER
1089*5113495bSYour Name #define LIMIT_IPA_TX_BUFFER 2048
1090*5113495bSYour Name #endif
1091*5113495bSYour Name 
1092*5113495bSYour Name /**
1093*5113495bSYour Name  * htt_tx_ipa_get_tx_buf_count() - Update WDI TX buffers count
1094*5113495bSYour Name  * @uc_tx_buf_cnt: TX Buffer count
1095*5113495bSYour Name  *
1096*5113495bSYour Name  * Return: new uc tx buffer count
1097*5113495bSYour Name  */
htt_tx_ipa_get_limit_tx_buf_count(unsigned int uc_tx_buf_cnt)1098*5113495bSYour Name static int htt_tx_ipa_get_limit_tx_buf_count(unsigned int uc_tx_buf_cnt)
1099*5113495bSYour Name {
1100*5113495bSYour Name 	/* In order to improve the Genoa IPA DBS KPI, need to set
1101*5113495bSYour Name 	 * IpaUcTxBufCount=2048, so tx complete ring size=2048, and
1102*5113495bSYour Name 	 * total tx buffer count = 2047.
1103*5113495bSYour Name 	 * But in fact, wlan fw just only have 5G 1100 tx desc +
1104*5113495bSYour Name 	 * 2.4G 400 desc, it can cover about 1500 packets from
1105*5113495bSYour Name 	 * IPA side.
1106*5113495bSYour Name 	 * So the remaining 2047-1500 packet are not used,
1107*5113495bSYour Name 	 * in order to save some memory, so we can use
1108*5113495bSYour Name 	 * LIMIT_IPA_TX_BUFFER to limit the max tx buffer
1109*5113495bSYour Name 	 * count, which varied from platform.
1110*5113495bSYour Name 	 * And then the tx buffer count always equal to tx complete
1111*5113495bSYour Name 	 * ring size -1 is not mandatory now.
1112*5113495bSYour Name 	 * From the trying, it has the same KPI achievement while
1113*5113495bSYour Name 	 * set LIMIT_IPA_TX_BUFFER=1500 or 2048.
1114*5113495bSYour Name 	 */
1115*5113495bSYour Name 	if (uc_tx_buf_cnt > LIMIT_IPA_TX_BUFFER)
1116*5113495bSYour Name 		return LIMIT_IPA_TX_BUFFER;
1117*5113495bSYour Name 	else
1118*5113495bSYour Name 		return uc_tx_buf_cnt;
1119*5113495bSYour Name }
1120*5113495bSYour Name 
1121*5113495bSYour Name /**
1122*5113495bSYour Name  * htt_tx_ipa_uc_wdi_tx_buf_alloc() - Alloc WDI TX buffers
1123*5113495bSYour Name  * @pdev: htt context
1124*5113495bSYour Name  * @uc_tx_buf_sz: TX buffer size
1125*5113495bSYour Name  * @uc_tx_buf_cnt: TX Buffer count
1126*5113495bSYour Name  * @uc_tx_partition_base: IPA UC TX partition base value
1127*5113495bSYour Name  *
1128*5113495bSYour Name  * Allocate WDI TX buffers. Also note Rome supports only WDI 1.0.
1129*5113495bSYour Name  *
1130*5113495bSYour Name  * Return: 0 success
1131*5113495bSYour Name  */
1132*5113495bSYour Name 
htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t * pdev,unsigned int uc_tx_buf_sz,unsigned int uc_tx_buf_cnt,unsigned int uc_tx_partition_base)1133*5113495bSYour Name static int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
1134*5113495bSYour Name 					  unsigned int uc_tx_buf_sz,
1135*5113495bSYour Name 					  unsigned int uc_tx_buf_cnt,
1136*5113495bSYour Name 					  unsigned int uc_tx_partition_base)
1137*5113495bSYour Name {
1138*5113495bSYour Name 	unsigned int tx_buffer_count;
1139*5113495bSYour Name 	qdf_dma_addr_t buffer_paddr;
1140*5113495bSYour Name 	uint32_t *header_ptr;
1141*5113495bSYour Name 	target_paddr_t *ring_vaddr;
1142*5113495bSYour Name 	qdf_shared_mem_t *shared_tx_buffer;
1143*5113495bSYour Name 
1144*5113495bSYour Name 	ring_vaddr = (target_paddr_t *)pdev->ipa_uc_tx_rsc.tx_comp_ring->vaddr;
1145*5113495bSYour Name 
1146*5113495bSYour Name 	/* Allocate TX buffers as many as possible */
1147*5113495bSYour Name 	for (tx_buffer_count = 0;
1148*5113495bSYour Name 	     tx_buffer_count < (uc_tx_buf_cnt - 1); tx_buffer_count++) {
1149*5113495bSYour Name 
1150*5113495bSYour Name 		shared_tx_buffer = qdf_mem_shared_mem_alloc(pdev->osdev,
1151*5113495bSYour Name 							    uc_tx_buf_sz);
1152*5113495bSYour Name 		if (!shared_tx_buffer || !shared_tx_buffer->vaddr) {
1153*5113495bSYour Name 			qdf_print("IPA WDI TX buffer alloc fail %d allocated",
1154*5113495bSYour Name 				tx_buffer_count);
1155*5113495bSYour Name 			goto out;
1156*5113495bSYour Name 		}
1157*5113495bSYour Name 
1158*5113495bSYour Name 		header_ptr = shared_tx_buffer->vaddr;
1159*5113495bSYour Name 		buffer_paddr = qdf_mem_get_dma_addr(pdev->osdev,
1160*5113495bSYour Name 						&shared_tx_buffer->mem_info);
1161*5113495bSYour Name 
1162*5113495bSYour Name 		/* HTT control header */
1163*5113495bSYour Name 		*header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT;
1164*5113495bSYour Name 		header_ptr++;
1165*5113495bSYour Name 
1166*5113495bSYour Name 		/* PKT ID */
1167*5113495bSYour Name 		*header_ptr |= ((uint16_t) uc_tx_partition_base +
1168*5113495bSYour Name 				tx_buffer_count) << 16;
1169*5113495bSYour Name 
1170*5113495bSYour Name 		header_ptr++;
1171*5113495bSYour Name 
1172*5113495bSYour Name 		/* Frag Desc Pointer */
1173*5113495bSYour Name 		/* 64bits descriptor, Low 32bits */
1174*5113495bSYour Name 		*header_ptr = qdf_get_lower_32_bits(buffer_paddr +
1175*5113495bSYour Name 					IPA_UC_TX_BUF_FRAG_DESC_OFFSET);
1176*5113495bSYour Name 		header_ptr++;
1177*5113495bSYour Name 
1178*5113495bSYour Name 		/* 64bits descriptor, high 32bits */
1179*5113495bSYour Name 		*header_ptr = qdf_get_upper_32_bits(buffer_paddr) &
1180*5113495bSYour Name 			IPA_UC_TX_BUF_PADDR_HI_MASK;
1181*5113495bSYour Name 		header_ptr++;
1182*5113495bSYour Name 
1183*5113495bSYour Name 		/* chanreq, peerid */
1184*5113495bSYour Name 		*header_ptr = 0xFFFFFFFF;
1185*5113495bSYour Name 		header_ptr++;
1186*5113495bSYour Name 
1187*5113495bSYour Name 		/* FRAG Header */
1188*5113495bSYour Name 		/* 6 words TSO header */
1189*5113495bSYour Name 		header_ptr += IPA_UC_TX_BUF_TSO_HDR_SIZE;
1190*5113495bSYour Name 		*header_ptr = buffer_paddr + IPA_UC_TX_BUF_FRAG_HDR_OFFSET;
1191*5113495bSYour Name 
1192*5113495bSYour Name 		*ring_vaddr = buffer_paddr;
1193*5113495bSYour Name 		pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[tx_buffer_count] =
1194*5113495bSYour Name 			shared_tx_buffer;
1195*5113495bSYour Name 
1196*5113495bSYour Name 		/* Memory barrier to ensure actual value updated */
1197*5113495bSYour Name 
1198*5113495bSYour Name 		ring_vaddr++;
1199*5113495bSYour Name 	}
1200*5113495bSYour Name 
1201*5113495bSYour Name out:
1202*5113495bSYour Name 
1203*5113495bSYour Name 	return tx_buffer_count;
1204*5113495bSYour Name }
1205*5113495bSYour Name 
1206*5113495bSYour Name /**
1207*5113495bSYour Name  * htt_tx_buf_pool_free() - Free tx buffer pool
1208*5113495bSYour Name  * @pdev: htt context
1209*5113495bSYour Name  *
1210*5113495bSYour Name  * Free memory in tx buffer pool
1211*5113495bSYour Name  *
1212*5113495bSYour Name  * Return: 0 success
1213*5113495bSYour Name  */
htt_tx_buf_pool_free(struct htt_pdev_t * pdev)1214*5113495bSYour Name static void htt_tx_buf_pool_free(struct htt_pdev_t *pdev)
1215*5113495bSYour Name {
1216*5113495bSYour Name 	uint16_t idx;
1217*5113495bSYour Name 
1218*5113495bSYour Name 	for (idx = 0; idx < pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
1219*5113495bSYour Name 		if (pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx]) {
1220*5113495bSYour Name 			qdf_mem_shared_mem_free(pdev->osdev,
1221*5113495bSYour Name 						pdev->ipa_uc_tx_rsc.
1222*5113495bSYour Name 							tx_buf_pool_strg[idx]);
1223*5113495bSYour Name 			pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx] = NULL;
1224*5113495bSYour Name 		}
1225*5113495bSYour Name 	}
1226*5113495bSYour Name }
1227*5113495bSYour Name #else
htt_tx_ipa_get_limit_tx_buf_count(unsigned int uc_tx_buf_cnt)1228*5113495bSYour Name static int htt_tx_ipa_get_limit_tx_buf_count(unsigned int uc_tx_buf_cnt)
1229*5113495bSYour Name {
1230*5113495bSYour Name 	return uc_tx_buf_cnt;
1231*5113495bSYour Name }
1232*5113495bSYour Name 
htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t * pdev,unsigned int uc_tx_buf_sz,unsigned int uc_tx_buf_cnt,unsigned int uc_tx_partition_base)1233*5113495bSYour Name static int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
1234*5113495bSYour Name 					  unsigned int uc_tx_buf_sz,
1235*5113495bSYour Name 					  unsigned int uc_tx_buf_cnt,
1236*5113495bSYour Name 					  unsigned int uc_tx_partition_base)
1237*5113495bSYour Name {
1238*5113495bSYour Name 	unsigned int tx_buffer_count;
1239*5113495bSYour Name 	unsigned int  tx_buffer_count_pwr2;
1240*5113495bSYour Name 	qdf_dma_addr_t buffer_paddr;
1241*5113495bSYour Name 	uint32_t *header_ptr;
1242*5113495bSYour Name 	uint32_t *ring_vaddr;
1243*5113495bSYour Name 	uint16_t idx;
1244*5113495bSYour Name 	qdf_shared_mem_t *shared_tx_buffer;
1245*5113495bSYour Name 
1246*5113495bSYour Name 	ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_ring->vaddr;
1247*5113495bSYour Name 
1248*5113495bSYour Name 	/* Allocate TX buffers as many as possible */
1249*5113495bSYour Name 	for (tx_buffer_count = 0;
1250*5113495bSYour Name 	     tx_buffer_count < (uc_tx_buf_cnt - 1); tx_buffer_count++) {
1251*5113495bSYour Name 		shared_tx_buffer = qdf_mem_shared_mem_alloc(pdev->osdev,
1252*5113495bSYour Name 							    uc_tx_buf_sz);
1253*5113495bSYour Name 		if (!shared_tx_buffer || !shared_tx_buffer->vaddr) {
1254*5113495bSYour Name 			qdf_print("TX BUF alloc fail, loop index: %d",
1255*5113495bSYour Name 				  tx_buffer_count);
1256*5113495bSYour Name 			goto pwr2;
1257*5113495bSYour Name 		}
1258*5113495bSYour Name 
1259*5113495bSYour Name 		/* Init buffer */
1260*5113495bSYour Name 		qdf_mem_zero(shared_tx_buffer->vaddr, uc_tx_buf_sz);
1261*5113495bSYour Name 		header_ptr = (uint32_t *)shared_tx_buffer->vaddr;
1262*5113495bSYour Name 		buffer_paddr = qdf_mem_get_dma_addr(pdev->osdev,
1263*5113495bSYour Name 						&shared_tx_buffer->mem_info);
1264*5113495bSYour Name 
1265*5113495bSYour Name 		/* HTT control header */
1266*5113495bSYour Name 		*header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT;
1267*5113495bSYour Name 		header_ptr++;
1268*5113495bSYour Name 
1269*5113495bSYour Name 		/* PKT ID */
1270*5113495bSYour Name 		*header_ptr |= ((uint16_t) uc_tx_partition_base +
1271*5113495bSYour Name 				tx_buffer_count) << 16;
1272*5113495bSYour Name 		header_ptr++;
1273*5113495bSYour Name 
1274*5113495bSYour Name 		/*FRAG Desc Pointer */
1275*5113495bSYour Name 		*header_ptr = (uint32_t) (buffer_paddr +
1276*5113495bSYour Name 						IPA_UC_TX_BUF_FRAG_DESC_OFFSET);
1277*5113495bSYour Name 		header_ptr++;
1278*5113495bSYour Name 		*header_ptr = 0xFFFFFFFF;
1279*5113495bSYour Name 
1280*5113495bSYour Name 		/* FRAG Header */
1281*5113495bSYour Name 		header_ptr++;
1282*5113495bSYour Name 		*header_ptr = buffer_paddr + IPA_UC_TX_BUF_FRAG_HDR_OFFSET;
1283*5113495bSYour Name 
1284*5113495bSYour Name 		*ring_vaddr = buffer_paddr;
1285*5113495bSYour Name 		pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[tx_buffer_count] =
1286*5113495bSYour Name 			shared_tx_buffer;
1287*5113495bSYour Name 		/* Memory barrier to ensure actual value updated */
1288*5113495bSYour Name 
1289*5113495bSYour Name 		ring_vaddr++;
1290*5113495bSYour Name 	}
1291*5113495bSYour Name 
1292*5113495bSYour Name pwr2:
1293*5113495bSYour Name 	/*
1294*5113495bSYour Name 	 * Tx complete ring buffer count should be power of 2.
1295*5113495bSYour Name 	 * So, allocated Tx buffer count should be one less than ring buffer
1296*5113495bSYour Name 	 * size.
1297*5113495bSYour Name 	 */
1298*5113495bSYour Name 	tx_buffer_count_pwr2 = qdf_rounddown_pow_of_two(tx_buffer_count + 1)
1299*5113495bSYour Name 			       - 1;
1300*5113495bSYour Name 	if (tx_buffer_count > tx_buffer_count_pwr2) {
1301*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
1302*5113495bSYour Name 			  "%s: Allocated Tx buffer count %d is rounded down to %d",
1303*5113495bSYour Name 			  __func__, tx_buffer_count, tx_buffer_count_pwr2);
1304*5113495bSYour Name 
1305*5113495bSYour Name 		/* Free over allocated buffers below power of 2 */
1306*5113495bSYour Name 		for (idx = tx_buffer_count_pwr2; idx < tx_buffer_count; idx++) {
1307*5113495bSYour Name 			if (pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx]) {
1308*5113495bSYour Name 				qdf_mem_shared_mem_free(pdev->osdev,
1309*5113495bSYour Name 							pdev->ipa_uc_tx_rsc.
1310*5113495bSYour Name 							tx_buf_pool_strg[idx]);
1311*5113495bSYour Name 				pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx] =
1312*5113495bSYour Name 									NULL;
1313*5113495bSYour Name 			}
1314*5113495bSYour Name 		}
1315*5113495bSYour Name 	}
1316*5113495bSYour Name 
1317*5113495bSYour Name 	return tx_buffer_count_pwr2;
1318*5113495bSYour Name }
1319*5113495bSYour Name 
htt_tx_buf_pool_free(struct htt_pdev_t * pdev)1320*5113495bSYour Name static void htt_tx_buf_pool_free(struct htt_pdev_t *pdev)
1321*5113495bSYour Name {
1322*5113495bSYour Name 	uint16_t idx;
1323*5113495bSYour Name 
1324*5113495bSYour Name 	for (idx = 0; idx < pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
1325*5113495bSYour Name 		if (pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx]) {
1326*5113495bSYour Name 			qdf_mem_shared_mem_free(pdev->osdev,
1327*5113495bSYour Name 						pdev->ipa_uc_tx_rsc.
1328*5113495bSYour Name 							tx_buf_pool_strg[idx]);
1329*5113495bSYour Name 			pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx] = NULL;
1330*5113495bSYour Name 		}
1331*5113495bSYour Name 	}
1332*5113495bSYour Name }
1333*5113495bSYour Name #endif
1334*5113495bSYour Name 
1335*5113495bSYour Name /**
1336*5113495bSYour Name  * htt_tx_ipa_uc_attach() - attach htt ipa uc tx resource
1337*5113495bSYour Name  * @pdev: htt context
1338*5113495bSYour Name  * @uc_tx_buf_sz: single tx buffer size
1339*5113495bSYour Name  * @uc_tx_buf_cnt: total tx buffer count
1340*5113495bSYour Name  * @uc_tx_partition_base: tx buffer partition start
1341*5113495bSYour Name  *
1342*5113495bSYour Name  * Return: 0 success
1343*5113495bSYour Name  *         ENOBUFS No memory fail
1344*5113495bSYour Name  */
htt_tx_ipa_uc_attach(struct htt_pdev_t * pdev,unsigned int uc_tx_buf_sz,unsigned int uc_tx_buf_cnt,unsigned int uc_tx_partition_base)1345*5113495bSYour Name int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
1346*5113495bSYour Name 			 unsigned int uc_tx_buf_sz,
1347*5113495bSYour Name 			 unsigned int uc_tx_buf_cnt,
1348*5113495bSYour Name 			 unsigned int uc_tx_partition_base)
1349*5113495bSYour Name {
1350*5113495bSYour Name 	int return_code = 0;
1351*5113495bSYour Name 	unsigned int tx_comp_ring_size;
1352*5113495bSYour Name 
1353*5113495bSYour Name 	/* Allocate CE Write Index WORD */
1354*5113495bSYour Name 	pdev->ipa_uc_tx_rsc.tx_ce_idx =
1355*5113495bSYour Name 		qdf_mem_shared_mem_alloc(pdev->osdev, 4);
1356*5113495bSYour Name 	if (!pdev->ipa_uc_tx_rsc.tx_ce_idx) {
1357*5113495bSYour Name 		qdf_print("Unable to allocate memory for IPA tx ce idx");
1358*5113495bSYour Name 		return -ENOBUFS;
1359*5113495bSYour Name 	}
1360*5113495bSYour Name 
1361*5113495bSYour Name 	/* Allocate TX COMP Ring */
1362*5113495bSYour Name 	tx_comp_ring_size = qdf_get_pwr2(uc_tx_buf_cnt)
1363*5113495bSYour Name 			    * sizeof(target_paddr_t);
1364*5113495bSYour Name 	pdev->ipa_uc_tx_rsc.tx_comp_ring =
1365*5113495bSYour Name 		qdf_mem_shared_mem_alloc(pdev->osdev,
1366*5113495bSYour Name 					 tx_comp_ring_size);
1367*5113495bSYour Name 	if (!pdev->ipa_uc_tx_rsc.tx_comp_ring ||
1368*5113495bSYour Name 	    !pdev->ipa_uc_tx_rsc.tx_comp_ring->vaddr) {
1369*5113495bSYour Name 		qdf_print("TX COMP ring alloc fail");
1370*5113495bSYour Name 		return_code = -ENOBUFS;
1371*5113495bSYour Name 		goto free_tx_ce_idx;
1372*5113495bSYour Name 	}
1373*5113495bSYour Name 
1374*5113495bSYour Name 	uc_tx_buf_cnt = htt_tx_ipa_get_limit_tx_buf_count(uc_tx_buf_cnt);
1375*5113495bSYour Name 	/* Allocate TX BUF vAddress Storage */
1376*5113495bSYour Name 	pdev->ipa_uc_tx_rsc.tx_buf_pool_strg =
1377*5113495bSYour Name 		qdf_mem_malloc(uc_tx_buf_cnt *
1378*5113495bSYour Name 			sizeof(*pdev->ipa_uc_tx_rsc.tx_buf_pool_strg));
1379*5113495bSYour Name 	if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_strg) {
1380*5113495bSYour Name 		return_code = -ENOBUFS;
1381*5113495bSYour Name 		goto free_tx_comp_base;
1382*5113495bSYour Name 	}
1383*5113495bSYour Name 
1384*5113495bSYour Name 	qdf_mem_zero(pdev->ipa_uc_tx_rsc.tx_buf_pool_strg,
1385*5113495bSYour Name 		     uc_tx_buf_cnt *
1386*5113495bSYour Name 		     sizeof(*pdev->ipa_uc_tx_rsc.tx_buf_pool_strg));
1387*5113495bSYour Name 
1388*5113495bSYour Name 	pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = htt_tx_ipa_uc_wdi_tx_buf_alloc(
1389*5113495bSYour Name 		pdev, uc_tx_buf_sz, uc_tx_buf_cnt, uc_tx_partition_base);
1390*5113495bSYour Name 
1391*5113495bSYour Name 	pdev->ipa_uc_tx_rsc.ipa_smmu_mapped = false;
1392*5113495bSYour Name 
1393*5113495bSYour Name 
1394*5113495bSYour Name 	return 0;
1395*5113495bSYour Name 
1396*5113495bSYour Name free_tx_comp_base:
1397*5113495bSYour Name 	qdf_mem_shared_mem_free(pdev->osdev,
1398*5113495bSYour Name 				pdev->ipa_uc_tx_rsc.tx_comp_ring);
1399*5113495bSYour Name free_tx_ce_idx:
1400*5113495bSYour Name 	qdf_mem_shared_mem_free(pdev->osdev,
1401*5113495bSYour Name 				pdev->ipa_uc_tx_rsc.tx_ce_idx);
1402*5113495bSYour Name 
1403*5113495bSYour Name 	return return_code;
1404*5113495bSYour Name }
1405*5113495bSYour Name 
1406*5113495bSYour Name /**
1407*5113495bSYour Name  * htt_tx_ipa_uc_detach() - Free WDI TX resources
1408*5113495bSYour Name  * @pdev: htt context
1409*5113495bSYour Name  *
1410*5113495bSYour Name  * Remove IPA WDI TX resources during device detach
1411*5113495bSYour Name  * Free all of allocated resources
1412*5113495bSYour Name  *
1413*5113495bSYour Name  * Return: 0 success
1414*5113495bSYour Name  */
htt_tx_ipa_uc_detach(struct htt_pdev_t * pdev)1415*5113495bSYour Name int htt_tx_ipa_uc_detach(struct htt_pdev_t *pdev)
1416*5113495bSYour Name {
1417*5113495bSYour Name 	qdf_mem_shared_mem_free(pdev->osdev,
1418*5113495bSYour Name 				pdev->ipa_uc_tx_rsc.tx_ce_idx);
1419*5113495bSYour Name 	qdf_mem_shared_mem_free(pdev->osdev,
1420*5113495bSYour Name 				pdev->ipa_uc_tx_rsc.tx_comp_ring);
1421*5113495bSYour Name 
1422*5113495bSYour Name 	/* Free each single buffer */
1423*5113495bSYour Name 	htt_tx_buf_pool_free(pdev);
1424*5113495bSYour Name 
1425*5113495bSYour Name 	/* Free storage */
1426*5113495bSYour Name 	qdf_mem_free(pdev->ipa_uc_tx_rsc.tx_buf_pool_strg);
1427*5113495bSYour Name 
1428*5113495bSYour Name 	return 0;
1429*5113495bSYour Name }
1430*5113495bSYour Name #endif /* IPA_OFFLOAD */
1431*5113495bSYour Name 
1432*5113495bSYour Name #if defined(FEATURE_TSO) && defined(HELIUMPLUS)
1433*5113495bSYour Name void
htt_tx_desc_fill_tso_info(htt_pdev_handle pdev,void * desc,struct qdf_tso_info_t * tso_info)1434*5113495bSYour Name htt_tx_desc_fill_tso_info(htt_pdev_handle pdev, void *desc,
1435*5113495bSYour Name 	 struct qdf_tso_info_t *tso_info)
1436*5113495bSYour Name {
1437*5113495bSYour Name 	u_int32_t *word;
1438*5113495bSYour Name 	int i;
1439*5113495bSYour Name 	struct qdf_tso_seg_elem_t *tso_seg = tso_info->curr_seg;
1440*5113495bSYour Name 	struct msdu_ext_desc_t *msdu_ext_desc = (struct msdu_ext_desc_t *)desc;
1441*5113495bSYour Name 
1442*5113495bSYour Name 	word = (u_int32_t *)(desc);
1443*5113495bSYour Name 
1444*5113495bSYour Name 	/* Initialize the TSO flags per MSDU */
1445*5113495bSYour Name 	msdu_ext_desc->tso_flags =
1446*5113495bSYour Name 		 tso_seg->seg.tso_flags;
1447*5113495bSYour Name 
1448*5113495bSYour Name 	/* First 24 bytes (6*4) contain the TSO flags */
1449*5113495bSYour Name 	TSO_DEBUG("%s seq# %u l2 len %d, ip len %d",
1450*5113495bSYour Name 		  __func__,
1451*5113495bSYour Name 		  tso_seg->seg.tso_flags.tcp_seq_num,
1452*5113495bSYour Name 		  tso_seg->seg.tso_flags.l2_len,
1453*5113495bSYour Name 		  tso_seg->seg.tso_flags.ip_len);
1454*5113495bSYour Name 	TSO_DEBUG("%s flags 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
1455*5113495bSYour Name 		  __func__,
1456*5113495bSYour Name 		  *word,
1457*5113495bSYour Name 		  *(word + 1),
1458*5113495bSYour Name 		  *(word + 2),
1459*5113495bSYour Name 		  *(word + 3),
1460*5113495bSYour Name 		  *(word + 4),
1461*5113495bSYour Name 		  *(word + 5));
1462*5113495bSYour Name 
1463*5113495bSYour Name 	word += 6;
1464*5113495bSYour Name 
1465*5113495bSYour Name 	for (i = 0; i < tso_seg->seg.num_frags; i++) {
1466*5113495bSYour Name 		uint32_t lo = 0;
1467*5113495bSYour Name 		uint32_t hi = 0;
1468*5113495bSYour Name 
1469*5113495bSYour Name 		qdf_dmaaddr_to_32s(tso_seg->seg.tso_frags[i].paddr,
1470*5113495bSYour Name 						&lo, &hi);
1471*5113495bSYour Name 		/* [31:0] first 32 bits of the buffer pointer  */
1472*5113495bSYour Name 		*word = lo;
1473*5113495bSYour Name 		word++;
1474*5113495bSYour Name 		/* [15:0] the upper 16 bits of the first buffer pointer */
1475*5113495bSYour Name 		/* [31:16] length of the first buffer */
1476*5113495bSYour Name 		*word = (tso_seg->seg.tso_frags[i].length << 16) | hi;
1477*5113495bSYour Name 		word++;
1478*5113495bSYour Name 		TSO_DEBUG("%s frag[%d] ptr_low 0x%x ptr_hi 0x%x len %u",
1479*5113495bSYour Name 			__func__, i,
1480*5113495bSYour Name 			msdu_ext_desc->frags[i].u.frag32.ptr_low,
1481*5113495bSYour Name 			msdu_ext_desc->frags[i].u.frag32.ptr_hi,
1482*5113495bSYour Name 			msdu_ext_desc->frags[i].u.frag32.len);
1483*5113495bSYour Name 	}
1484*5113495bSYour Name 
1485*5113495bSYour Name 	if (tso_seg->seg.num_frags < FRAG_NUM_MAX)
1486*5113495bSYour Name 		*word = 0;
1487*5113495bSYour Name 	qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_FILLHTTSEG);
1488*5113495bSYour Name }
1489*5113495bSYour Name #endif /* FEATURE_TSO */
1490*5113495bSYour Name 
1491*5113495bSYour Name /**
1492*5113495bSYour Name  * htt_get_ext_tid() - get ext_tid value
1493*5113495bSYour Name  * @type: extension header type
1494*5113495bSYour Name  * @ext_header_data: header data
1495*5113495bSYour Name  * @msdu_info: msdu info
1496*5113495bSYour Name  *
1497*5113495bSYour Name  * Return: ext_tid value
1498*5113495bSYour Name  */
1499*5113495bSYour Name static inline
htt_get_ext_tid(enum extension_header_type type,void * ext_header_data,struct htt_msdu_info_t * msdu_info)1500*5113495bSYour Name int htt_get_ext_tid(enum extension_header_type type,
1501*5113495bSYour Name 	void *ext_header_data, struct htt_msdu_info_t *msdu_info)
1502*5113495bSYour Name {
1503*5113495bSYour Name 	if (type == OCB_MODE_EXT_HEADER && ext_header_data)
1504*5113495bSYour Name 		return ((struct ocb_tx_ctrl_hdr_t *)ext_header_data)->ext_tid;
1505*5113495bSYour Name 	else
1506*5113495bSYour Name 		return msdu_info->info.ext_tid;
1507*5113495bSYour Name }
1508*5113495bSYour Name 
1509*5113495bSYour Name /**
1510*5113495bSYour Name  * htt_get_channel_freq() - get channel frequency
1511*5113495bSYour Name  * @type: extension header type
1512*5113495bSYour Name  * @ext_header_data: header data
1513*5113495bSYour Name  *
1514*5113495bSYour Name  * Return: channel frequency number
1515*5113495bSYour Name  */
1516*5113495bSYour Name static inline
htt_get_channel_freq(enum extension_header_type type,void * ext_header_data)1517*5113495bSYour Name int htt_get_channel_freq(enum extension_header_type type,
1518*5113495bSYour Name 	void *ext_header_data)
1519*5113495bSYour Name {
1520*5113495bSYour Name 	if (type == OCB_MODE_EXT_HEADER && ext_header_data)
1521*5113495bSYour Name 		return ((struct ocb_tx_ctrl_hdr_t *)ext_header_data)
1522*5113495bSYour Name 							->channel_freq;
1523*5113495bSYour Name 	else
1524*5113495bSYour Name 		return HTT_INVALID_CHANNEL;
1525*5113495bSYour Name }
1526*5113495bSYour Name 
1527*5113495bSYour Name /**
1528*5113495bSYour Name  * htt_fill_ocb_ext_header() - fill OCB extension header
1529*5113495bSYour Name  * @msdu: network buffer
1530*5113495bSYour Name  * @local_desc_ext: extension descriptor
1531*5113495bSYour Name  * @type: extension header type
1532*5113495bSYour Name  * @ext_header_data: header data
1533*5113495bSYour Name  * @is_dsrc: is dsrc is eenabled or not
1534*5113495bSYour Name  *
1535*5113495bSYour Name  * Return: none
1536*5113495bSYour Name  */
1537*5113495bSYour Name #ifdef WLAN_FEATURE_DSRC
1538*5113495bSYour Name static
htt_fill_ocb_ext_header(qdf_nbuf_t msdu,struct htt_tx_msdu_desc_ext_t * local_desc_ext,enum extension_header_type type,void * ext_header_data)1539*5113495bSYour Name void htt_fill_ocb_ext_header(qdf_nbuf_t msdu,
1540*5113495bSYour Name 			     struct htt_tx_msdu_desc_ext_t *local_desc_ext,
1541*5113495bSYour Name 			     enum extension_header_type type,
1542*5113495bSYour Name 			     void *ext_header_data)
1543*5113495bSYour Name {
1544*5113495bSYour Name 	struct ocb_tx_ctrl_hdr_t *tx_ctrl =
1545*5113495bSYour Name 		(struct ocb_tx_ctrl_hdr_t *)ext_header_data;
1546*5113495bSYour Name 
1547*5113495bSYour Name 	if (tx_ctrl->all_flags == 0)
1548*5113495bSYour Name 		return;
1549*5113495bSYour Name 	/*
1550*5113495bSYour Name 	 * Copy the info that was read from TX control header from the
1551*5113495bSYour Name 	 * user application to the extended HTT header.
1552*5113495bSYour Name 	 * First copy everything
1553*5113495bSYour Name 	 * to a local temp structure, and then copy everything to the
1554*5113495bSYour Name 	 * actual uncached structure in one go to save memory writes.
1555*5113495bSYour Name 	 */
1556*5113495bSYour Name 	local_desc_ext->valid_pwr = tx_ctrl->valid_pwr;
1557*5113495bSYour Name 	local_desc_ext->valid_mcs_mask = tx_ctrl->valid_datarate;
1558*5113495bSYour Name 	local_desc_ext->valid_retries = tx_ctrl->valid_retries;
1559*5113495bSYour Name 	local_desc_ext->valid_expire_tsf = tx_ctrl->valid_expire_tsf;
1560*5113495bSYour Name 	local_desc_ext->valid_chainmask = tx_ctrl->valid_chain_mask;
1561*5113495bSYour Name 
1562*5113495bSYour Name 	local_desc_ext->pwr = tx_ctrl->pwr;
1563*5113495bSYour Name 	if (tx_ctrl->valid_datarate &&
1564*5113495bSYour Name 			tx_ctrl->datarate <= htt_ofdm_datarate_max)
1565*5113495bSYour Name 		local_desc_ext->mcs_mask =
1566*5113495bSYour Name 			(1 << (tx_ctrl->datarate + 4));
1567*5113495bSYour Name 	local_desc_ext->retry_limit = tx_ctrl->retry_limit;
1568*5113495bSYour Name 	local_desc_ext->expire_tsf_lo = tx_ctrl->expire_tsf_lo;
1569*5113495bSYour Name 	local_desc_ext->expire_tsf_hi = tx_ctrl->expire_tsf_hi;
1570*5113495bSYour Name 	local_desc_ext->chain_mask = tx_ctrl->chain_mask;
1571*5113495bSYour Name 	local_desc_ext->is_dsrc = 1;
1572*5113495bSYour Name 	qdf_nbuf_push_head(msdu, sizeof(struct htt_tx_msdu_desc_ext_t));
1573*5113495bSYour Name 	qdf_mem_copy(qdf_nbuf_data(msdu), local_desc_ext,
1574*5113495bSYour Name 			sizeof(struct htt_tx_msdu_desc_ext_t));
1575*5113495bSYour Name 	QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(msdu) = 1;
1576*5113495bSYour Name }
1577*5113495bSYour Name #else
1578*5113495bSYour Name static
htt_fill_ocb_ext_header(qdf_nbuf_t msdu,struct htt_tx_msdu_desc_ext_t * local_desc_ext,enum extension_header_type type,void * ext_header_data)1579*5113495bSYour Name void htt_fill_ocb_ext_header(qdf_nbuf_t msdu,
1580*5113495bSYour Name 			     struct htt_tx_msdu_desc_ext_t *local_desc_ext,
1581*5113495bSYour Name 			     enum extension_header_type type,
1582*5113495bSYour Name 			     void *ext_header_data)
1583*5113495bSYour Name {
1584*5113495bSYour Name }
1585*5113495bSYour Name #endif
1586*5113495bSYour Name 
1587*5113495bSYour Name /**
1588*5113495bSYour Name  * htt_fill_wisa_ext_header() - fill WiSA extension header
1589*5113495bSYour Name  * @msdu: network buffer
1590*5113495bSYour Name  * @local_desc_ext: extension descriptor
1591*5113495bSYour Name  * @type: extension header type
1592*5113495bSYour Name  * @ext_header_data: header data
1593*5113495bSYour Name  *
1594*5113495bSYour Name  * Return: none
1595*5113495bSYour Name  */
1596*5113495bSYour Name static
htt_fill_wisa_ext_header(qdf_nbuf_t msdu,struct htt_tx_msdu_desc_ext_t * local_desc_ext,enum extension_header_type type,void * ext_header_data)1597*5113495bSYour Name void htt_fill_wisa_ext_header(qdf_nbuf_t msdu,
1598*5113495bSYour Name 	struct htt_tx_msdu_desc_ext_t *local_desc_ext,
1599*5113495bSYour Name 	enum extension_header_type type, void *ext_header_data)
1600*5113495bSYour Name {
1601*5113495bSYour Name 	void *qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
1602*5113495bSYour Name 	QDF_STATUS status;
1603*5113495bSYour Name 
1604*5113495bSYour Name 	if (!qdf_ctx)
1605*5113495bSYour Name 		return;
1606*5113495bSYour Name 
1607*5113495bSYour Name 	local_desc_ext->valid_mcs_mask = 1;
1608*5113495bSYour Name 	if (WISA_MODE_EXT_HEADER_6MBPS == type)
1609*5113495bSYour Name 		local_desc_ext->mcs_mask = htt_ofdm_datarate_6_mbps;
1610*5113495bSYour Name 	else
1611*5113495bSYour Name 		local_desc_ext->mcs_mask = htt_ofdm_datarate_24_mbps;
1612*5113495bSYour Name 	local_desc_ext->valid_nss_mask = 1;
1613*5113495bSYour Name 	local_desc_ext->nss_mask = 1;
1614*5113495bSYour Name 	local_desc_ext->valid_bandwidth = 1;
1615*5113495bSYour Name 	local_desc_ext->bandwidth_mask = htt_tx_bandwidth_20MHz;
1616*5113495bSYour Name 	local_desc_ext->valid_guard_interval = 1;
1617*5113495bSYour Name 	local_desc_ext->guard_interval = htt_tx_guard_interval_regular;
1618*5113495bSYour Name 
1619*5113495bSYour Name 	/*
1620*5113495bSYour Name 	 * Do dma_unmap and dma_map again if already mapped
1621*5113495bSYour Name 	 * as adding extra bytes in skb
1622*5113495bSYour Name 	 */
1623*5113495bSYour Name 	if (QDF_NBUF_CB_PADDR(msdu) != 0)
1624*5113495bSYour Name 		qdf_nbuf_unmap_single(qdf_ctx, msdu, QDF_DMA_TO_DEVICE);
1625*5113495bSYour Name 
1626*5113495bSYour Name 	qdf_nbuf_push_head(msdu, sizeof(struct htt_tx_msdu_desc_ext_t));
1627*5113495bSYour Name 	qdf_mem_copy(qdf_nbuf_data(msdu), local_desc_ext,
1628*5113495bSYour Name 			sizeof(struct htt_tx_msdu_desc_ext_t));
1629*5113495bSYour Name 
1630*5113495bSYour Name 	if (QDF_NBUF_CB_PADDR(msdu) != 0) {
1631*5113495bSYour Name 		status = qdf_nbuf_map_single(qdf_ctx, msdu, QDF_DMA_TO_DEVICE);
1632*5113495bSYour Name 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
1633*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
1634*5113495bSYour Name 				"%s: nbuf map failed", __func__);
1635*5113495bSYour Name 			return;
1636*5113495bSYour Name 		}
1637*5113495bSYour Name 	}
1638*5113495bSYour Name 	QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(msdu) = 1;
1639*5113495bSYour Name }
1640*5113495bSYour Name 
1641*5113495bSYour Name /**
1642*5113495bSYour Name  * htt_push_ext_header() - fill extension header
1643*5113495bSYour Name  * @msdu: network buffer
1644*5113495bSYour Name  * @local_desc_ext: extension descriptor
1645*5113495bSYour Name  * @type: extension header type
1646*5113495bSYour Name  * @ext_header_data: header data
1647*5113495bSYour Name  * @is_dsrc: is dsrc is eenabled or not
1648*5113495bSYour Name  *
1649*5113495bSYour Name  * Return: none
1650*5113495bSYour Name  */
1651*5113495bSYour Name static
htt_push_ext_header(qdf_nbuf_t msdu,struct htt_tx_msdu_desc_ext_t * local_desc_ext,enum extension_header_type type,void * ext_header_data)1652*5113495bSYour Name void htt_push_ext_header(qdf_nbuf_t msdu,
1653*5113495bSYour Name 	struct htt_tx_msdu_desc_ext_t *local_desc_ext,
1654*5113495bSYour Name 	enum extension_header_type type, void *ext_header_data)
1655*5113495bSYour Name {
1656*5113495bSYour Name 	switch (type) {
1657*5113495bSYour Name 	case OCB_MODE_EXT_HEADER:
1658*5113495bSYour Name 		htt_fill_ocb_ext_header(msdu, local_desc_ext,
1659*5113495bSYour Name 					type, ext_header_data);
1660*5113495bSYour Name 		break;
1661*5113495bSYour Name 	case WISA_MODE_EXT_HEADER_6MBPS:
1662*5113495bSYour Name 	case WISA_MODE_EXT_HEADER_24MBPS:
1663*5113495bSYour Name 		htt_fill_wisa_ext_header(msdu, local_desc_ext,
1664*5113495bSYour Name 					type, ext_header_data);
1665*5113495bSYour Name 		break;
1666*5113495bSYour Name 	default:
1667*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
1668*5113495bSYour Name 			"Invalid EXT header type %d\n", type);
1669*5113495bSYour Name 		break;
1670*5113495bSYour Name 	}
1671*5113495bSYour Name }
1672*5113495bSYour Name 
1673*5113495bSYour Name QDF_STATUS
htt_tx_desc_init(htt_pdev_handle pdev,void * htt_tx_desc,qdf_dma_addr_t htt_tx_desc_paddr,uint16_t msdu_id,qdf_nbuf_t msdu,struct htt_msdu_info_t * msdu_info,struct qdf_tso_info_t * tso_info,void * ext_header_data,enum extension_header_type type)1674*5113495bSYour Name htt_tx_desc_init(htt_pdev_handle pdev,
1675*5113495bSYour Name 		 void *htt_tx_desc,
1676*5113495bSYour Name 		 qdf_dma_addr_t htt_tx_desc_paddr,
1677*5113495bSYour Name 		 uint16_t msdu_id,
1678*5113495bSYour Name 		 qdf_nbuf_t msdu, struct htt_msdu_info_t *msdu_info,
1679*5113495bSYour Name 		 struct qdf_tso_info_t *tso_info,
1680*5113495bSYour Name 		 void *ext_header_data,
1681*5113495bSYour Name 		 enum extension_header_type type)
1682*5113495bSYour Name {
1683*5113495bSYour Name 	uint8_t  pkt_type, pkt_subtype = 0, ce_pkt_type = 0;
1684*5113495bSYour Name 	uint32_t hw_classify = 0, data_attr = 0;
1685*5113495bSYour Name 	uint32_t *word0, *word1, local_word3;
1686*5113495bSYour Name #if HTT_PADDR64
1687*5113495bSYour Name 	uint32_t *word4;
1688*5113495bSYour Name #else /* ! HTT_PADDR64 */
1689*5113495bSYour Name 	uint32_t *word3;
1690*5113495bSYour Name #endif /* HTT_PADDR64 */
1691*5113495bSYour Name 	uint32_t local_word0, local_word1;
1692*5113495bSYour Name 	struct htt_host_tx_desc_t *htt_host_tx_desc =
1693*5113495bSYour Name 		(struct htt_host_tx_desc_t *)
1694*5113495bSYour Name 		(((char *)htt_tx_desc) - HTT_TX_DESC_VADDR_OFFSET);
1695*5113495bSYour Name 	bool desc_ext_required = (type != EXT_HEADER_NOT_PRESENT);
1696*5113495bSYour Name 	int channel_freq;
1697*5113495bSYour Name 	void *qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
1698*5113495bSYour Name 	qdf_dma_dir_t dir;
1699*5113495bSYour Name 	QDF_STATUS status;
1700*5113495bSYour Name 
1701*5113495bSYour Name 	if (qdf_unlikely(!qdf_ctx))
1702*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1703*5113495bSYour Name 
1704*5113495bSYour Name 	if (qdf_unlikely(!msdu_info)) {
1705*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1706*5113495bSYour Name 			"%s: bad arg: msdu_info is NULL", __func__);
1707*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1708*5113495bSYour Name 	}
1709*5113495bSYour Name 	if (qdf_unlikely(!tso_info)) {
1710*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1711*5113495bSYour Name 			"%s: bad arg: tso_info is NULL", __func__);
1712*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1713*5113495bSYour Name 	}
1714*5113495bSYour Name 
1715*5113495bSYour Name 	word0 = (uint32_t *) htt_tx_desc;
1716*5113495bSYour Name 	word1 = word0 + 1;
1717*5113495bSYour Name 	/*
1718*5113495bSYour Name 	 * word2 is frag desc pointer
1719*5113495bSYour Name 	 * word3 or 4 is peer_id
1720*5113495bSYour Name 	 */
1721*5113495bSYour Name #if HTT_PADDR64
1722*5113495bSYour Name 	word4 = word0 + 4;      /* Dword 3 */
1723*5113495bSYour Name #else /* ! HTT_PADDR64  */
1724*5113495bSYour Name 	word3 = word0 + 3;      /* Dword 3 */
1725*5113495bSYour Name #endif /* HTT_PADDR64 */
1726*5113495bSYour Name 
1727*5113495bSYour Name 	pkt_type = msdu_info->info.l2_hdr_type;
1728*5113495bSYour Name 
1729*5113495bSYour Name 	if (qdf_likely(pdev->cfg.ce_classify_enabled)) {
1730*5113495bSYour Name 		if (qdf_likely(pkt_type == htt_pkt_type_eth2 ||
1731*5113495bSYour Name 			pkt_type == htt_pkt_type_ethernet))
1732*5113495bSYour Name 			qdf_nbuf_tx_info_get(msdu, pkt_type, pkt_subtype,
1733*5113495bSYour Name 				     hw_classify);
1734*5113495bSYour Name 
1735*5113495bSYour Name 		ce_pkt_type = htt_to_ce_pkt_type[pkt_type];
1736*5113495bSYour Name 		if (0xffffffff == ce_pkt_type) {
1737*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
1738*5113495bSYour Name 			"Invalid HTT pkt type %d\n", pkt_type);
1739*5113495bSYour Name 			return QDF_STATUS_E_INVAL;
1740*5113495bSYour Name 		}
1741*5113495bSYour Name 	}
1742*5113495bSYour Name 
1743*5113495bSYour Name 	/*
1744*5113495bSYour Name 	 * HTT Tx Desc is in uncached memory. Used cached writes per word, to
1745*5113495bSYour Name 	 * reduce unnecessary memory access.
1746*5113495bSYour Name 	 */
1747*5113495bSYour Name 
1748*5113495bSYour Name 	local_word0 = 0;
1749*5113495bSYour Name 
1750*5113495bSYour Name 	HTT_H2T_MSG_TYPE_SET(local_word0, HTT_H2T_MSG_TYPE_TX_FRM);
1751*5113495bSYour Name 	HTT_TX_DESC_PKT_TYPE_SET(local_word0, pkt_type);
1752*5113495bSYour Name 	HTT_TX_DESC_PKT_SUBTYPE_SET(local_word0, pkt_subtype);
1753*5113495bSYour Name 	HTT_TX_DESC_VDEV_ID_SET(local_word0, msdu_info->info.vdev_id);
1754*5113495bSYour Name 	HTT_TX_DESC_EXT_TID_SET(local_word0, htt_get_ext_tid(type,
1755*5113495bSYour Name 					ext_header_data, msdu_info));
1756*5113495bSYour Name 	HTT_TX_DESC_EXTENSION_SET(local_word0, desc_ext_required);
1757*5113495bSYour Name 	HTT_TX_DESC_EXT_TID_SET(local_word0, msdu_info->info.ext_tid);
1758*5113495bSYour Name 	HTT_TX_DESC_CKSUM_OFFLOAD_SET(local_word0,
1759*5113495bSYour Name 				      msdu_info->action.cksum_offload);
1760*5113495bSYour Name 	if (pdev->cfg.is_high_latency)
1761*5113495bSYour Name 		HTT_TX_DESC_TX_COMP_SET(local_word0, msdu_info->action.
1762*5113495bSYour Name 							tx_comp_req);
1763*5113495bSYour Name 	HTT_TX_DESC_NO_ENCRYPT_SET(local_word0,
1764*5113495bSYour Name 				   msdu_info->action.do_encrypt ?
1765*5113495bSYour Name 				   0 : 1);
1766*5113495bSYour Name 
1767*5113495bSYour Name 	*word0 = local_word0;
1768*5113495bSYour Name 
1769*5113495bSYour Name 	local_word1 = 0;
1770*5113495bSYour Name 
1771*5113495bSYour Name 	if (tso_info->is_tso) {
1772*5113495bSYour Name 		uint32_t total_len = tso_info->curr_seg->seg.total_len;
1773*5113495bSYour Name 
1774*5113495bSYour Name 		HTT_TX_DESC_FRM_LEN_SET(local_word1, total_len);
1775*5113495bSYour Name 		TSO_DEBUG("%s setting HTT TX DESC Len = %d",
1776*5113495bSYour Name 			  __func__, total_len);
1777*5113495bSYour Name 	} else {
1778*5113495bSYour Name 		HTT_TX_DESC_FRM_LEN_SET(local_word1, qdf_nbuf_len(msdu));
1779*5113495bSYour Name 	}
1780*5113495bSYour Name 
1781*5113495bSYour Name 	QDF_BUG(HTT_TX_DESC_FRM_LEN_GET(local_word1) != 0);
1782*5113495bSYour Name 
1783*5113495bSYour Name 	HTT_TX_DESC_FRM_ID_SET(local_word1, msdu_id);
1784*5113495bSYour Name 	*word1 = local_word1;
1785*5113495bSYour Name 
1786*5113495bSYour Name 	/*
1787*5113495bSYour Name 	 * Initialize peer_id to INVALID_PEER because
1788*5113495bSYour Name 	 * this is NOT Reinjection path
1789*5113495bSYour Name 	 */
1790*5113495bSYour Name 	local_word3 = HTT_INVALID_PEER;
1791*5113495bSYour Name 	channel_freq = htt_get_channel_freq(type, ext_header_data);
1792*5113495bSYour Name 	if (channel_freq != HTT_INVALID_CHANNEL && channel_freq > 0)
1793*5113495bSYour Name 		HTT_TX_DESC_CHAN_FREQ_SET(local_word3, channel_freq);
1794*5113495bSYour Name #if HTT_PADDR64
1795*5113495bSYour Name 	*word4 = local_word3;
1796*5113495bSYour Name #else /* ! HTT_PADDR64 */
1797*5113495bSYour Name 	*word3 = local_word3;
1798*5113495bSYour Name #endif /* HTT_PADDR64 */
1799*5113495bSYour Name 
1800*5113495bSYour Name 	/*
1801*5113495bSYour Name 	 *  If any of the tx control flags are set, then we need the extended
1802*5113495bSYour Name 	 *  HTT header.
1803*5113495bSYour Name 	 */
1804*5113495bSYour Name 	if (desc_ext_required) {
1805*5113495bSYour Name 		struct htt_tx_msdu_desc_ext_t local_desc_ext = {0};
1806*5113495bSYour Name 
1807*5113495bSYour Name 		htt_push_ext_header(msdu, &local_desc_ext,
1808*5113495bSYour Name 			type, ext_header_data);
1809*5113495bSYour Name 	}
1810*5113495bSYour Name 
1811*5113495bSYour Name 	/*
1812*5113495bSYour Name 	 * Specify that the data provided by the OS is a bytestream,
1813*5113495bSYour Name 	 * and thus should not be byte-swapped during the HIF download
1814*5113495bSYour Name 	 * even if the host is big-endian.
1815*5113495bSYour Name 	 * There could be extra fragments added before the OS's fragments,
1816*5113495bSYour Name 	 * e.g. for TSO, so it's incorrect to clear the frag 0 wordstream flag.
1817*5113495bSYour Name 	 * Instead, clear the wordstream flag for the final fragment, which
1818*5113495bSYour Name 	 * is certain to be (one of the) fragment(s) provided by the OS.
1819*5113495bSYour Name 	 * Setting the flag for this final fragment suffices for specifying
1820*5113495bSYour Name 	 * all fragments provided by the OS rather than added by the driver.
1821*5113495bSYour Name 	 */
1822*5113495bSYour Name 	qdf_nbuf_set_frag_is_wordstream(msdu, qdf_nbuf_get_num_frags(msdu) - 1,
1823*5113495bSYour Name 					0);
1824*5113495bSYour Name 
1825*5113495bSYour Name 	if (QDF_NBUF_CB_PADDR(msdu) == 0) {
1826*5113495bSYour Name 		dir = QDF_NBUF_CB_TX_DMA_BI_MAP(msdu) ?
1827*5113495bSYour Name 			QDF_DMA_BIDIRECTIONAL : QDF_DMA_TO_DEVICE;
1828*5113495bSYour Name 		status = qdf_nbuf_map_single(qdf_ctx, msdu, dir);
1829*5113495bSYour Name 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
1830*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1831*5113495bSYour Name 				"%s: nbuf map failed", __func__);
1832*5113495bSYour Name 			return QDF_STATUS_E_NOMEM;
1833*5113495bSYour Name 		}
1834*5113495bSYour Name 	}
1835*5113495bSYour Name 
1836*5113495bSYour Name 	/* store a link to the HTT tx descriptor within the netbuf */
1837*5113495bSYour Name 	qdf_nbuf_frag_push_head(msdu, sizeof(struct htt_host_tx_desc_t),
1838*5113495bSYour Name 				(char *)htt_host_tx_desc, /* virtual addr */
1839*5113495bSYour Name 				htt_tx_desc_paddr);
1840*5113495bSYour Name 
1841*5113495bSYour Name 	/*
1842*5113495bSYour Name 	 * Indicate that the HTT header (and HTC header) is a meta-data
1843*5113495bSYour Name 	 * "wordstream", i.e. series of uint32_t, rather than a data
1844*5113495bSYour Name 	 * bytestream.
1845*5113495bSYour Name 	 * This allows the HIF download to byteswap the HTT + HTC headers if
1846*5113495bSYour Name 	 * the host is big-endian, to convert to the target's little-endian
1847*5113495bSYour Name 	 * format.
1848*5113495bSYour Name 	 */
1849*5113495bSYour Name 	qdf_nbuf_set_frag_is_wordstream(msdu, 0, 1);
1850*5113495bSYour Name 
1851*5113495bSYour Name 	if (qdf_likely(pdev->cfg.ce_classify_enabled &&
1852*5113495bSYour Name 		(msdu_info->info.l2_hdr_type != htt_pkt_type_mgmt))) {
1853*5113495bSYour Name 		uint32_t pkt_offset = qdf_nbuf_get_frag_len(msdu, 0);
1854*5113495bSYour Name 
1855*5113495bSYour Name 		data_attr = hw_classify << CE_DESC_TX_CLASSIFY_BIT_S;
1856*5113495bSYour Name 		data_attr |= ce_pkt_type << CE_DESC_PKT_TYPE_BIT_S;
1857*5113495bSYour Name 		data_attr |= pkt_offset  << CE_DESC_PKT_OFFSET_BIT_S;
1858*5113495bSYour Name 	}
1859*5113495bSYour Name 
1860*5113495bSYour Name 	qdf_nbuf_data_attr_set(msdu, data_attr);
1861*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1862*5113495bSYour Name }
1863*5113495bSYour Name 
1864*5113495bSYour Name #ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
1865*5113495bSYour Name 
1866*5113495bSYour Name /**
1867*5113495bSYour Name  * htt_tx_group_credit_process() - process group data for
1868*5113495bSYour Name  *				   credit update indication
1869*5113495bSYour Name  * @pdev: pointer to htt device.
1870*5113495bSYour Name  * @msg_word: htt msg
1871*5113495bSYour Name  *
1872*5113495bSYour Name  * Return: None
1873*5113495bSYour Name  */
htt_tx_group_credit_process(struct htt_pdev_t * pdev,u_int32_t * msg_word)1874*5113495bSYour Name void htt_tx_group_credit_process(struct htt_pdev_t *pdev, u_int32_t *msg_word)
1875*5113495bSYour Name {
1876*5113495bSYour Name 	int group_credit_sign;
1877*5113495bSYour Name 	int32_t group_credit;
1878*5113495bSYour Name 	u_int32_t group_credit_abs, vdev_id_mask, ac_mask;
1879*5113495bSYour Name 	u_int8_t group_abs, group_id;
1880*5113495bSYour Name 	u_int8_t group_offset = 0, more_group_present = 0;
1881*5113495bSYour Name 
1882*5113495bSYour Name 	more_group_present = HTT_TX_CREDIT_TXQ_GRP_GET(*msg_word);
1883*5113495bSYour Name 
1884*5113495bSYour Name 	while (more_group_present) {
1885*5113495bSYour Name 		/* Parse the Group Data */
1886*5113495bSYour Name 		group_id = HTT_TXQ_GROUP_ID_GET(*(msg_word+1
1887*5113495bSYour Name 						+group_offset));
1888*5113495bSYour Name 		group_credit_abs =
1889*5113495bSYour Name 			HTT_TXQ_GROUP_CREDIT_COUNT_GET(*(msg_word+1
1890*5113495bSYour Name 						+group_offset));
1891*5113495bSYour Name 		group_credit_sign =
1892*5113495bSYour Name 			HTT_TXQ_GROUP_SIGN_GET(*(msg_word+1
1893*5113495bSYour Name 						+group_offset)) ? -1 : 1;
1894*5113495bSYour Name 		group_credit = group_credit_sign * group_credit_abs;
1895*5113495bSYour Name 		group_abs = HTT_TXQ_GROUP_ABS_GET(*(msg_word+1
1896*5113495bSYour Name 						+group_offset));
1897*5113495bSYour Name 
1898*5113495bSYour Name 		vdev_id_mask =
1899*5113495bSYour Name 			HTT_TXQ_GROUP_VDEV_ID_MASK_GET(*(msg_word+2
1900*5113495bSYour Name 						+group_offset));
1901*5113495bSYour Name 		ac_mask = HTT_TXQ_GROUP_AC_MASK_GET(*(msg_word+2
1902*5113495bSYour Name 						+group_offset));
1903*5113495bSYour Name 
1904*5113495bSYour Name 		ol_txrx_update_tx_queue_groups(pdev->txrx_pdev, group_id,
1905*5113495bSYour Name 					       group_credit, group_abs,
1906*5113495bSYour Name 					       vdev_id_mask, ac_mask);
1907*5113495bSYour Name 		more_group_present = HTT_TXQ_GROUP_EXT_GET(*(msg_word+1
1908*5113495bSYour Name 						+group_offset));
1909*5113495bSYour Name 		group_offset += HTT_TX_GROUP_INDEX_OFFSET;
1910*5113495bSYour Name 	}
1911*5113495bSYour Name 	ol_tx_update_group_credit_stats(pdev->txrx_pdev);
1912*5113495bSYour Name }
1913*5113495bSYour Name #endif
1914*5113495bSYour Name 
1915