Home
last modified time | relevance | path

Searched refs:tx_desc (Results 1 – 25 of 44) sorted by relevance

12

/wlan-driver/qcacld-3.0/core/dp/txrx/
H A Dol_tx_desc.c40 struct ol_tx_desc_t *tx_desc) in ol_tx_desc_sanity_checks() argument
42 if (tx_desc->pkt_type != ol_tx_frm_freed) { in ol_tx_desc_sanity_checks()
44 tx_desc->pkt_type, pdev); in ol_tx_desc_sanity_checks()
48 static inline void ol_tx_desc_reset_pkt_type(struct ol_tx_desc_t *tx_desc) in ol_tx_desc_reset_pkt_type() argument
50 tx_desc->pkt_type = ol_tx_frm_freed; in ol_tx_desc_reset_pkt_type()
53 static inline void ol_tx_desc_compute_delay(struct ol_tx_desc_t *tx_desc) in ol_tx_desc_compute_delay() argument
55 if (tx_desc->entry_timestamp_ticks != 0xffffffff) { in ol_tx_desc_compute_delay()
56 ol_txrx_err("Timestamp:0x%x", tx_desc->entry_timestamp_ticks); in ol_tx_desc_compute_delay()
59 tx_desc->entry_timestamp_ticks = qdf_system_ticks(); in ol_tx_desc_compute_delay()
61 static inline void ol_tx_desc_reset_timestamp(struct ol_tx_desc_t *tx_desc) in ol_tx_desc_reset_timestamp() argument
[all …]
H A Dol_tx_desc.h101 void **td_base = (void **)pdev->tx_desc.desc_pages.cacheable_pages; in ol_tx_desc_find()
104 (td_base[tx_desc_id >> pdev->tx_desc.page_divider] + in ol_tx_desc_find()
105 (pdev->tx_desc.desc_reserved_size * in ol_tx_desc_find()
106 (tx_desc_id & pdev->tx_desc.offset_filter))))->tx_desc; in ol_tx_desc_find()
123 struct ol_tx_desc_t *tx_desc; in ol_tx_desc_find_check() local
125 if (tx_desc_id >= pdev->tx_desc.pool_size) in ol_tx_desc_find_check()
128 tx_desc = ol_tx_desc_find(pdev, tx_desc_id); in ol_tx_desc_find_check()
130 if (tx_desc->pkt_type == ol_tx_frm_freed) in ol_tx_desc_find_check()
133 return tx_desc; in ol_tx_desc_find_check()
141 struct ol_tx_desc_t *tx_desc; in ol_tx_desc_find_check() local
[all …]
H A Dol_tx_send.c111 static inline void ol_tx_desc_update_comp_ts(struct ol_tx_desc_t *tx_desc) in ol_tx_desc_update_comp_ts() argument
113 tx_desc->desc_debug_info.last_comp_ts = qdf_get_log_timestamp(); in ol_tx_desc_update_comp_ts()
116 static inline void ol_tx_desc_update_comp_ts(struct ol_tx_desc_t *tx_desc) in ol_tx_desc_update_comp_ts() argument
160 struct ol_tx_desc_t *tx_desc, qdf_nbuf_t msdu) in ol_tx_send_base() argument
192 OL_TX_DESC_REF_INIT(tx_desc); in ol_tx_send_base()
193 OL_TX_DESC_REF_INC(tx_desc); in ol_tx_send_base()
194 OL_TX_DESC_REF_INC(tx_desc); in ol_tx_send_base()
201 struct ol_tx_desc_t *tx_desc, qdf_nbuf_t msdu, uint8_t vdev_id) in ol_tx_send() argument
207 msdu_credit_consumed = ol_tx_send_base(pdev, tx_desc, msdu); in ol_tx_send()
208 id = ol_tx_desc_id(pdev, tx_desc); in ol_tx_send()
[all …]
H A Dol_txrx_flow_control.c144 free_desc = pdev->tx_desc.num_free; in ol_tx_get_total_free_desc()
145 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock); in ol_tx_get_total_free_desc()
146 TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list, in ol_tx_get_total_free_desc()
152 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock); in ol_tx_get_total_free_desc()
165 qdf_spinlock_create(&pdev->tx_desc.flow_pool_list_lock); in ol_tx_register_flow_control()
166 TAILQ_INIT(&pdev->tx_desc.flow_pool_list); in ol_tx_register_flow_control()
189 qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock); in ol_tx_deregister_flow_control()
190 while (!TAILQ_EMPTY(&pdev->tx_desc.flow_pool_list)) { in ol_tx_deregister_flow_control()
191 pool = TAILQ_FIRST(&pdev->tx_desc.flow_pool_list); in ol_tx_deregister_flow_control()
194 qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock); in ol_tx_deregister_flow_control()
[all …]
H A Dol_tx_ll_fastpath.c152 struct ol_tx_desc_t *tx_desc = NULL; in ol_tx_prepare_ll_fast() local
158 tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info); in ol_tx_prepare_ll_fast()
159 if (qdf_unlikely(!tx_desc)) in ol_tx_prepare_ll_fast()
162 tx_desc->netbuf = msdu; in ol_tx_prepare_ll_fast()
164 tx_desc->tso_desc = msdu_info->tso_info.curr_seg; in ol_tx_prepare_ll_fast()
165 qdf_tso_seg_dbg_setowner(tx_desc->tso_desc, tx_desc); in ol_tx_prepare_ll_fast()
166 qdf_tso_seg_dbg_record(tx_desc->tso_desc, in ol_tx_prepare_ll_fast()
168 tx_desc->tso_num_desc = msdu_info->tso_info.tso_num_seg_list; in ol_tx_prepare_ll_fast()
169 tx_desc->pkt_type = OL_TX_FRM_TSO; in ol_tx_prepare_ll_fast()
172 tx_desc->pkt_type = OL_TX_FRM_STD; in ol_tx_prepare_ll_fast()
[all …]
H A Dol_tx_ll.c67 struct ol_tx_desc_t *tx_desc = NULL; in ol_tx_reinject() local
76 tx_desc = ol_tx_prepare_ll(vdev, msdu, &msdu_info); in ol_tx_reinject()
77 if (!tx_desc) in ol_tx_reinject()
80 HTT_TX_DESC_POSTPONED_SET(*((uint32_t *)(tx_desc->htt_tx_desc)), true); in ol_tx_reinject()
82 htt_tx_desc_set_peer_id(tx_desc->htt_tx_desc, peer_id); in ol_tx_reinject()
84 ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id); in ol_tx_reinject()
102 struct ol_tx_desc_t *tx_desc; in ol_tx_prepare_ll() local
106 tx_desc = ol_tx_desc_ll(pdev, vdev, msdu, msdu_info); in ol_tx_prepare_ll()
107 if (qdf_unlikely(!tx_desc)) { in ol_tx_prepare_ll()
120 return tx_desc; in ol_tx_prepare_ll()
[all …]
H A Dol_tx_ll_legacy.c72 struct ol_tx_desc_t *tx_desc = NULL; in ol_tx_ll() local
112 tx_desc = ol_tx_prepare_ll(vdev, msdu, &msdu_info); in ol_tx_ll()
113 if (!tx_desc) in ol_tx_ll()
116 ol_tx_trace_pkt(msdu, tx_desc->id, vdev->vdev_id, in ol_tx_ll()
134 htt_tx_desc_display(tx_desc->htt_tx_desc); in ol_tx_ll()
136 ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id); in ol_tx_ll()
173 struct ol_tx_desc_t *tx_desc = NULL; in ol_tx_ll() local
177 tx_desc = ol_tx_prepare_ll(vdev, msdu, &msdu_info); in ol_tx_ll()
178 if (!tx_desc) in ol_tx_ll()
181 ol_tx_trace_pkt(msdu, tx_desc->id, vdev->vdev_id, in ol_tx_ll()
[all …]
H A Dol_tx_hl.c139 struct ol_tx_desc_t *tx_desc = NULL; in ol_tx_hl_desc_alloc() local
143 tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info); in ol_tx_hl_desc_alloc()
149 tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info); in ol_tx_hl_desc_alloc()
153 return tx_desc; in ol_tx_hl_desc_alloc()
191 struct ol_tx_desc_t *tx_desc = in ol_tx_hl_desc_alloc() local
194 if (!tx_desc) in ol_tx_hl_desc_alloc()
201 return tx_desc; in ol_tx_hl_desc_alloc()
225 return tx_desc; in ol_tx_hl_desc_alloc()
236 struct ol_tx_desc_t *tx_desc = NULL; in ol_tx_hl_desc_alloc() local
238 tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info); in ol_tx_hl_desc_alloc()
[all …]
H A Dol_tx_classify.h75 struct ol_tx_desc_t *tx_desc,
82 struct ol_tx_desc_t *tx_desc,
88 #define ol_tx_classify(vdev, tx_desc, netbuf, tx_msdu_info) NULL argument
89 #define ol_tx_classify_mgmt(vdev, tx_desc, netbuf, tx_msdu_info) NULL argument
H A Dol_txrx_encap.h50 struct ol_tx_desc_t *tx_desc,
82 struct ol_tx_desc_t *tx_desc, in OL_TX_ENCAP() argument
86 return ol_tx_encap(vdev, tx_desc, msdu, msdu_info); in OL_TX_ENCAP()
107 #define OL_TX_ENCAP(vdev, tx_desc, msdu, msdu_info) A_OK argument
H A Dol_txrx_encap.c55 struct ol_tx_desc_t *tx_desc, in ol_tx_encap_from_native_wifi() argument
100 htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc, in ol_tx_encap_from_native_wifi()
105 tx_desc->orig_l2_hdr_bytes = hdsize; in ol_tx_encap_from_native_wifi()
110 if (tx_desc->orig_l2_hdr_bytes) { in ol_tx_encap_from_native_wifi()
112 htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc, in ol_tx_encap_from_native_wifi()
121 htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc, in ol_tx_encap_from_native_wifi()
126 tx_desc->orig_l2_hdr_bytes = hdsize; in ol_tx_encap_from_native_wifi()
135 struct ol_tx_desc_t *tx_desc, in ol_tx_encap_from_8023() argument
252 htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc, in ol_tx_encap_from_8023()
257 tx_desc->orig_l2_hdr_bytes = hdsize; in ol_tx_encap_from_8023()
[all …]
H A Dol_tx.c182 struct ol_tx_desc_t *tx_desc; in ol_txrx_mgmt_send_ext() local
240 tx_desc = ol_txrx_mgmt_tx_desc_alloc(pdev, vdev, tx_mgmt_frm, in ol_txrx_mgmt_send_ext()
242 if (!tx_desc) in ol_txrx_mgmt_send_ext()
247 tx_desc->pkt_type = type + OL_TXRX_MGMT_TYPE_BASE; in ol_txrx_mgmt_send_ext()
249 result = ol_txrx_mgmt_send_frame(vdev, tx_desc, tx_mgmt_frm, in ol_txrx_mgmt_send_ext()
H A Dol_tx_queue.c209 struct ol_tx_desc_t *tx_desc) in is_ol_tx_discard_frames_success() argument
214 vdev = tx_desc->vdev; in is_ol_tx_discard_frames_success()
230 discard_frames = (!ol_tx_desc_is_high_prio(tx_desc->netbuf) && in is_ol_tx_discard_frames_success()
233 (ol_tx_desc_is_high_prio(tx_desc->netbuf) && in is_ol_tx_discard_frames_success()
245 struct ol_tx_desc_t *tx_desc) in is_ol_tx_discard_frames_success() argument
256 struct ol_tx_desc_t *tx_desc, in ol_tx_enqueue() argument
269 if (is_ol_tx_discard_frames_success(pdev, tx_desc)) { in ol_tx_enqueue()
279 TAILQ_INSERT_TAIL(&txq->head, tx_desc, tx_desc_list_elem); in ol_tx_enqueue()
281 bytes = qdf_nbuf_len(tx_desc->netbuf); in ol_tx_enqueue()
290 notify_ctx.bytes = qdf_nbuf_len(tx_desc->netbuf); in ol_tx_enqueue()
[all …]
H A Dol_txrx.c1074 uint16_t desc_pool_size = pdev->tx_desc.pool_size; in ol_txrx_pdev_set_threshold()
1079 pdev->tx_desc.start_th = (start_threshold * desc_pool_size) / 100; in ol_txrx_pdev_set_threshold()
1080 pdev->tx_desc.stop_th = (stop_threshold * desc_pool_size) / 100; in ol_txrx_pdev_set_threshold()
1081 pdev->tx_desc.stop_priority_th = in ol_txrx_pdev_set_threshold()
1082 (TX_PRIORITY_TH * pdev->tx_desc.stop_th) / 100; in ol_txrx_pdev_set_threshold()
1083 if (pdev->tx_desc.stop_priority_th >= MAX_TSO_SEGMENT_DESC) in ol_txrx_pdev_set_threshold()
1084 pdev->tx_desc.stop_priority_th -= MAX_TSO_SEGMENT_DESC; in ol_txrx_pdev_set_threshold()
1086 pdev->tx_desc.start_priority_th = in ol_txrx_pdev_set_threshold()
1087 (TX_PRIORITY_TH * pdev->tx_desc.start_th) / 100; in ol_txrx_pdev_set_threshold()
1088 if (pdev->tx_desc.start_priority_th >= MAX_TSO_SEGMENT_DESC) in ol_txrx_pdev_set_threshold()
[all …]
H A Dol_tx_classify.c46 #define OL_TX_CLASSIFY_EXTENSION(vdev, tx_desc, netbuf, msdu_info, txq) argument
47 #define OL_TX_CLASSIFY_MGMT_EXTENSION(vdev, tx_desc, netbuf, msdu_info, txq) argument
423 struct ol_tx_desc_t *tx_desc, in ol_tx_classify() argument
629 OL_TX_CLASSIFY_EXTENSION(vdev, tx_desc, tx_nbuf, tx_msdu_info, txq); in ol_tx_classify()
645 tx_desc->txq = txq; in ol_tx_classify()
654 struct ol_tx_desc_t *tx_desc, in ol_tx_classify_mgmt() argument
753 OL_TX_CLASSIFY_MGMT_EXTENSION(vdev, tx_desc, tx_nbuf, in ol_tx_classify_mgmt()
760 tx_desc->txq = txq; in ol_tx_classify_mgmt()
770 struct ol_tx_desc_t *tx_desc, in ol_tx_classify_extension() argument
881 struct ol_tx_desc_t *tx_desc, in ol_tx_classify_mgmt_extension() argument
H A Dol_txrx_legacy_flow_control.c161 pdev->tx_desc.pool_size + in ol_tx_get_max_to_send()
162 pdev->tx_desc.num_free); in ol_tx_get_max_to_send()
246 max_to_accept = vdev->pdev->tx_desc.num_free - in ol_tx_vdev_ll_pause_queue_send_base()
494 (pdev->tx_desc.pool_size - pdev->tx_desc.num_free); in ol_tx_pdev_ll_pause_queue_send_all()
607 if (vdev->pdev->tx_desc.num_free < (uint16_t)low_watermark) { in ol_txrx_get_tx_resource()
689 if (pdev->tx_desc.num_free > vdev->tx_fl_hwm) { in ol_tx_flow_ct_unpause_os_q()
H A Dol_txrx_internal.h37 #define OL_TX_DESC_NO_REFS(tx_desc) 1 argument
38 #define OL_TX_DESC_REF_INIT(tx_desc) /* no-op */ argument
39 #define OL_TX_DESC_REF_INC(tx_desc) /* no-op */ argument
41 #define OL_TX_DESC_NO_REFS(tx_desc) \ argument
42 qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
43 #define OL_TX_DESC_REF_INIT(tx_desc) qdf_atomic_init(&tx_desc->ref_cnt) argument
44 #define OL_TX_DESC_REF_INC(tx_desc) qdf_atomic_inc(&tx_desc->ref_cnt) argument
/wlan-driver/qca-wifi-host-cmn/dp/wifi3.0/li/
H A Ddp_li_tx.c92 struct dp_tx_desc_s *tx_desc, in dp_tx_process_htt_completion_li() argument
124 if (qdf_unlikely(!tx_desc->flags)) { in dp_tx_process_htt_completion_li()
126 tx_desc->id); in dp_tx_process_htt_completion_li()
130 if (qdf_unlikely(tx_desc->vdev_id == DP_INVALID_VDEV_ID)) { in dp_tx_process_htt_completion_li()
131 dp_tx_comp_info_rl("Invalid vdev_id %d", tx_desc->id); in dp_tx_process_htt_completion_li()
132 tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR; in dp_tx_process_htt_completion_li()
136 pdev = tx_desc->pdev; in dp_tx_process_htt_completion_li()
140 tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR; in dp_tx_process_htt_completion_li()
144 if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) { in dp_tx_process_htt_completion_li()
145 dp_tx_comp_info_rl("pdev in down state %d", tx_desc->id); in dp_tx_process_htt_completion_li()
[all …]
H A Ddp_li_tx.h41 struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
69 struct dp_tx_desc_s *tx_desc,
/wlan-driver/qca-wifi-host-cmn/dp/wifi3.0/rh/
H A Ddp_rh_tx.c98 struct dp_tx_desc_s *tx_desc; in dp_tx_comp_find_tx_desc_rh() local
104 tx_desc = dp_tx_desc_find(soc, pool_id, in dp_tx_comp_find_tx_desc_rh()
110 if (tx_desc && tx_desc->pool_id != pool_id) { in dp_tx_comp_find_tx_desc_rh()
112 pool_id, tx_desc->pool_id); in dp_tx_comp_find_tx_desc_rh()
117 return tx_desc; in dp_tx_comp_find_tx_desc_rh()
121 struct dp_tx_desc_s *tx_desc, in dp_tx_process_htt_completion_rh() argument
277 struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata, in dp_tx_hw_enqueue_rh() argument
284 qdf_nbuf_t nbuf = tx_desc->nbuf; in dp_tx_hw_enqueue_rh()
302 if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) { in dp_tx_hw_enqueue_rh()
303 dp_err_rl("Invalid tx desc id:%d", tx_desc->id); in dp_tx_hw_enqueue_rh()
[all …]
/wlan-driver/qca-wifi-host-cmn/dp/wifi3.0/be/
H A Ddp_be_tx.c263 struct dp_tx_desc_s *tx_desc, in dp_tx_process_htt_completion_be() argument
297 if (qdf_unlikely(!tx_desc->flags)) { in dp_tx_process_htt_completion_be()
299 tx_desc->id); in dp_tx_process_htt_completion_be()
303 if (qdf_unlikely(tx_desc->vdev_id == DP_INVALID_VDEV_ID)) { in dp_tx_process_htt_completion_be()
304 dp_tx_comp_info_rl("Invalid vdev_id %d", tx_desc->id); in dp_tx_process_htt_completion_be()
305 tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR; in dp_tx_process_htt_completion_be()
309 pdev = tx_desc->pdev; in dp_tx_process_htt_completion_be()
313 tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR; in dp_tx_process_htt_completion_be()
317 if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) { in dp_tx_process_htt_completion_be()
318 dp_tx_comp_info_rl("pdev in down state %d", tx_desc->id); in dp_tx_process_htt_completion_be()
[all …]
/wlan-driver/qca-wifi-host-cmn/dp/wifi3.0/
H A Ddp_tx.c314 struct dp_tx_desc_s *tx_desc) in dp_tx_tso_desc_release() argument
317 if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_desc)) { in dp_tx_tso_desc_release()
320 } else if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_num_desc)) { in dp_tx_tso_desc_release()
325 (struct qdf_tso_num_seg_elem_t *)tx_desc-> in dp_tx_tso_desc_release()
330 dp_tso_num_seg_free(soc, tx_desc->pool_id, in dp_tx_tso_desc_release()
331 tx_desc->msdu_ext_desc-> in dp_tx_tso_desc_release()
333 tx_desc->msdu_ext_desc->tso_num_desc = NULL; in dp_tx_tso_desc_release()
334 DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1); in dp_tx_tso_desc_release()
339 tx_desc->pool_id, tx_desc->msdu_ext_desc-> in dp_tx_tso_desc_release()
341 tx_desc->msdu_ext_desc->tso_desc = NULL; in dp_tx_tso_desc_release()
[all …]
H A Ddp_tx_desc.h115 static inline void dp_tx_desc_set_magic(struct dp_tx_desc_s *tx_desc, in dp_tx_desc_set_magic() argument
118 tx_desc->magic = magic_pattern; in dp_tx_desc_set_magic()
121 static inline void dp_tx_desc_set_magic(struct dp_tx_desc_s *tx_desc, in dp_tx_desc_set_magic() argument
471 dp_tx_desc_clear(struct dp_tx_desc_s *tx_desc) in dp_tx_desc_clear() argument
473 tx_desc->vdev_id = DP_INVALID_VDEV_ID; in dp_tx_desc_clear()
474 tx_desc->nbuf = NULL; in dp_tx_desc_clear()
475 tx_desc->flags = 0; in dp_tx_desc_clear()
476 tx_desc->next = NULL; in dp_tx_desc_clear()
509 struct dp_tx_desc_s *tx_desc = pool->freelist; in dp_tx_get_desc_flow_pool() local
513 return tx_desc; in dp_tx_get_desc_flow_pool()
[all …]
H A Ddp_tx.h325 void dp_tx_desc_release(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
338 void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
352 struct dp_tx_desc_s *tx_desc,
386 struct dp_tx_desc_s *tx_desc,
404 struct dp_tx_desc_s *tx_desc,
450 dp_ppeds_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc);
453 dp_ppeds_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc) in dp_ppeds_tx_desc_free() argument
1112 static inline void dp_tx_comp_process_exception(struct dp_tx_desc_s *tx_desc) in dp_tx_comp_process_exception() argument
1384 struct dp_tx_desc_s *tx_desc,
1401 struct dp_tx_desc_s *tx_desc,
[all …]
H A Ddp_tx_desc.c29 soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
36 soc->tx_desc[pool_id].page_divider = (sig_bit - 1); \
78 struct dp_tx_desc_s *tx_desc = (struct dp_tx_desc_s *)elem; in dp_tx_desc_clean_up() local
82 if (tx_desc->nbuf) { in dp_tx_desc_clean_up()
83 nbuf = dp_tx_comp_free_buf(soc, tx_desc, true); in dp_tx_desc_clean_up()
84 dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id); in dp_tx_desc_clean_up()
150 dp_global->tx_desc[soc->arch_id][pool_id] = in dp_tx_desc_pool_alloc_mem()
168 if (!dp_global->tx_desc[soc->arch_id][pool_id]) in dp_tx_desc_pool_free_mem()
171 qdf_mem_free(dp_global->tx_desc[soc->arch_id][pool_id]); in dp_tx_desc_pool_free_mem()
172 dp_global->tx_desc[soc->arch_id][pool_id] = NULL; in dp_tx_desc_pool_free_mem()

12