Lines Matching refs:tx_desc

314 				   struct dp_tx_desc_s *tx_desc)  in dp_tx_tso_desc_release()  argument
317 if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_desc)) { in dp_tx_tso_desc_release()
320 } else if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_num_desc)) { in dp_tx_tso_desc_release()
325 (struct qdf_tso_num_seg_elem_t *)tx_desc-> in dp_tx_tso_desc_release()
330 dp_tso_num_seg_free(soc, tx_desc->pool_id, in dp_tx_tso_desc_release()
331 tx_desc->msdu_ext_desc-> in dp_tx_tso_desc_release()
333 tx_desc->msdu_ext_desc->tso_num_desc = NULL; in dp_tx_tso_desc_release()
334 DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1); in dp_tx_tso_desc_release()
339 tx_desc->pool_id, tx_desc->msdu_ext_desc-> in dp_tx_tso_desc_release()
341 tx_desc->msdu_ext_desc->tso_desc = NULL; in dp_tx_tso_desc_release()
354 struct dp_tx_desc_s *tx_desc) in dp_tx_tso_desc_release() argument
361 dp_tx_release_ds_tx_desc(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc, in dp_tx_release_ds_tx_desc() argument
364 if (tx_desc->flags & DP_TX_DESC_FLAG_PPEDS) { in dp_tx_release_ds_tx_desc()
366 dp_tx_desc_free(soc, tx_desc, desc_pool_id); in dp_tx_release_ds_tx_desc()
375 dp_tx_release_ds_tx_desc(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc, in dp_tx_release_ds_tx_desc() argument
383 dp_tx_desc_release(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc, in dp_tx_desc_release() argument
386 struct dp_pdev *pdev = tx_desc->pdev; in dp_tx_desc_release()
389 if (dp_tx_release_ds_tx_desc(soc, tx_desc, desc_pool_id)) in dp_tx_desc_release()
398 if (tx_desc->msdu_ext_desc) { in dp_tx_desc_release()
399 if (tx_desc->frm_type == dp_tx_frm_tso) in dp_tx_desc_release()
400 dp_tx_tso_desc_release(soc, tx_desc); in dp_tx_desc_release()
402 if (tx_desc->flags & DP_TX_DESC_FLAG_ME) in dp_tx_desc_release()
403 dp_tx_me_free_buf(tx_desc->pdev, in dp_tx_desc_release()
404 tx_desc->msdu_ext_desc->me_buffer); in dp_tx_desc_release()
406 dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id); in dp_tx_desc_release()
408 tx_desc->msdu_ext_desc = NULL; in dp_tx_desc_release()
411 if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) in dp_tx_desc_release()
415 tx_desc->buffer_src) in dp_tx_desc_release()
416 comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp, in dp_tx_desc_release()
422 tx_desc->id, comp_status, in dp_tx_desc_release()
426 if (tx_desc->flags & DP_TX_DESC_FLAG_SPECIAL) in dp_tx_desc_release()
427 dp_tx_spcl_desc_free(soc, tx_desc, desc_pool_id); in dp_tx_desc_release()
429 dp_tx_desc_free(soc, tx_desc, desc_pool_id); in dp_tx_desc_release()
1023 dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc, in dp_tx_traffic_end_indication_set_desc_flag() argument
1030 tx_desc->flags |= DP_TX_DESC_FLAG_TRAFFIC_END_IND; in dp_tx_traffic_end_indication_set_desc_flag()
1108 dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc, in dp_tx_traffic_end_indication_set_desc_flag() argument
1177 struct dp_tx_desc_s *tx_desc; in dp_tx_prepare_desc_single() local
1187 tx_desc = dp_tx_spcl_desc_alloc(soc, desc_pool_id); in dp_tx_prepare_desc_single()
1189 tx_desc = dp_tx_desc_alloc(soc, desc_pool_id); in dp_tx_prepare_desc_single()
1191 if (qdf_unlikely(!tx_desc)) { in dp_tx_prepare_desc_single()
1203 tx_desc->nbuf = nbuf; in dp_tx_prepare_desc_single()
1204 tx_desc->frm_type = dp_tx_frm_std; in dp_tx_prepare_desc_single()
1205 tx_desc->tx_encap_type = ((tx_exc_metadata && in dp_tx_prepare_desc_single()
1208 tx_desc->vdev_id = vdev->vdev_id; in dp_tx_prepare_desc_single()
1209 tx_desc->pdev = pdev; in dp_tx_prepare_desc_single()
1210 tx_desc->msdu_ext_desc = NULL; in dp_tx_prepare_desc_single()
1211 tx_desc->pkt_offset = 0; in dp_tx_prepare_desc_single()
1212 tx_desc->length = qdf_nbuf_headlen(nbuf); in dp_tx_prepare_desc_single()
1214 dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id, in dp_tx_prepare_desc_single()
1230 return tx_desc; in dp_tx_prepare_desc_single()
1286 tx_desc->length = qdf_nbuf_headlen(nbuf); in dp_tx_prepare_desc_single()
1287 tx_desc->pkt_offset = align_pad + htt_hdr_size; in dp_tx_prepare_desc_single()
1288 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; in dp_tx_prepare_desc_single()
1289 dp_tx_traffic_end_indication_set_desc_flag(tx_desc, in dp_tx_prepare_desc_single()
1292 tx_desc->length -= tx_desc->pkt_offset; in dp_tx_prepare_desc_single()
1300 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; in dp_tx_prepare_desc_single()
1304 return tx_desc; in dp_tx_prepare_desc_single()
1307 dp_tx_desc_release(soc, tx_desc, desc_pool_id); in dp_tx_prepare_desc_single()
1330 struct dp_tx_desc_s *tx_desc; in dp_tx_prepare_desc() local
1340 tx_desc = dp_tx_spcl_desc_alloc(soc, desc_pool_id); in dp_tx_prepare_desc()
1342 tx_desc = dp_tx_desc_alloc(soc, desc_pool_id); in dp_tx_prepare_desc()
1344 if (!tx_desc) { in dp_tx_prepare_desc()
1350 nbuf, tx_desc->id, DP_TX_DESC_COOKIE); in dp_tx_prepare_desc()
1355 tx_desc->nbuf = nbuf; in dp_tx_prepare_desc()
1356 tx_desc->frm_type = msdu_info->frm_type; in dp_tx_prepare_desc()
1357 tx_desc->tx_encap_type = vdev->tx_encap_type; in dp_tx_prepare_desc()
1358 tx_desc->vdev_id = vdev->vdev_id; in dp_tx_prepare_desc()
1359 tx_desc->pdev = pdev; in dp_tx_prepare_desc()
1360 tx_desc->pkt_offset = 0; in dp_tx_prepare_desc()
1362 dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id, in dp_tx_prepare_desc()
1379 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; in dp_tx_prepare_desc()
1384 tx_desc->msdu_ext_desc = msdu_ext_desc; in dp_tx_prepare_desc()
1385 tx_desc->flags |= DP_TX_DESC_FLAG_FRAG; in dp_tx_prepare_desc()
1390 tx_desc->dma_addr = msdu_ext_desc->paddr; in dp_tx_prepare_desc()
1393 tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA; in dp_tx_prepare_desc()
1395 tx_desc->length = HAL_TX_EXTENSION_DESC_LEN_BYTES; in dp_tx_prepare_desc()
1397 return tx_desc; in dp_tx_prepare_desc()
1399 dp_tx_desc_release(soc, tx_desc, desc_pool_id); in dp_tx_prepare_desc()
1553 struct dp_tx_desc_s *tx_desc, in dp_tx_update_stats() argument
1556 uint32_t stats_len = dp_tx_get_pkt_len(tx_desc); in dp_tx_update_stats()
1563 struct dp_tx_desc_s *tx_desc, in dp_tx_attempt_coalescing() argument
1577 tcl_data.nbuf = tx_desc->nbuf; in dp_tx_attempt_coalescing()
1580 tcl_data.pkt_len = dp_tx_get_pkt_len(tx_desc); in dp_tx_attempt_coalescing()
1891 struct dp_tx_desc_s *tx_desc) in dp_tx_update_tdls_flags() argument
1895 tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME; in dp_tx_update_tdls_flags()
1938 struct dp_tx_desc_s *tx_desc) in dp_non_std_htt_tx_comp_free_buff() argument
1943 qdf_nbuf_t nbuf = tx_desc->nbuf; in dp_non_std_htt_tx_comp_free_buff()
1944 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id, in dp_non_std_htt_tx_comp_free_buff()
1952 hal_tx_comp_get_htt_desc(&tx_desc->comp, htt_tx_status); in dp_non_std_htt_tx_comp_free_buff()
1954 dp_debug("vdev_id: %d tx_status: %d", tx_desc->vdev_id, tx_status); in dp_non_std_htt_tx_comp_free_buff()
1985 struct dp_tx_desc_s *tx_desc, in dp_tx_msdu_single_map() argument
1988 if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME))) in dp_tx_msdu_single_map()
2000 struct dp_tx_desc_s *tx_desc) in dp_tx_update_tdls_flags() argument
2005 struct dp_tx_desc_s *tx_desc) in dp_non_std_htt_tx_comp_free_buff() argument
2010 struct dp_tx_desc_s *tx_desc, in dp_tx_msdu_single_map() argument
2022 struct dp_tx_desc_s *tx_desc, in dp_tx_nbuf_map_regular() argument
2027 ret = dp_tx_msdu_single_map(vdev, tx_desc, nbuf); in dp_tx_nbuf_map_regular()
2082 struct dp_tx_desc_s *tx_desc) in dp_tx_rmnet_nbuf_map() argument
2087 tx_desc->length = msdu_info->buf_len; in dp_tx_rmnet_nbuf_map()
2093 tx_desc->flags |= DP_TX_DESC_FLAG_RMNET; in dp_tx_rmnet_nbuf_map()
2105 struct dp_tx_desc_s *tx_desc) in dp_tx_rmnet_nbuf_map() argument
2114 struct dp_tx_desc_s *tx_desc, in dp_tx_nbuf_map() argument
2117 if (qdf_likely(tx_desc->flags & DP_TX_DESC_FLAG_FAST)) { in dp_tx_nbuf_map()
2122 return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf); in dp_tx_nbuf_map()
2137 struct dp_tx_desc_s *tx_desc, in dp_tx_nbuf_map() argument
2140 return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf); in dp_tx_nbuf_map()
2189 struct dp_tx_desc_s *tx_desc) in dp_tx_update_mesh_flags() argument
2192 tx_desc->flags |= DP_TX_DESC_FLAG_MESH_MODE; in dp_tx_update_mesh_flags()
2204 struct dp_tx_desc_s *tx_desc, in dp_mesh_tx_comp_free_buff() argument
2207 qdf_nbuf_t nbuf = tx_desc->nbuf; in dp_mesh_tx_comp_free_buff()
2211 vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id, DP_MOD_ID_MESH); in dp_mesh_tx_comp_free_buff()
2212 if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) { in dp_mesh_tx_comp_free_buff()
2240 struct dp_tx_desc_s *tx_desc) in dp_tx_update_mesh_flags() argument
2245 struct dp_tx_desc_s *tx_desc, in dp_mesh_tx_comp_free_buff() argument
2303 dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc, in dp_tx_bypass_reinjection() argument
2310 if (!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)) { in dp_tx_bypass_reinjection()
2311 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; in dp_tx_bypass_reinjection()
2339 dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc, in dp_tx_bypass_reinjection() argument
2447 struct dp_tx_desc_s *tx_desc) in dp_tx_update_ts_on_enqueued() argument
2452 tx_desc->driver_ingress_ts = msdu_info->driver_ingress_ts; in dp_tx_update_ts_on_enqueued()
2453 tx_desc->driver_egress_ts = qdf_ktime_real_get(); in dp_tx_update_ts_on_enqueued()
2504 struct dp_tx_desc_s *tx_desc, in dp_tx_latency_stats_update() argument
2518 if (!tx_desc->driver_ingress_ts || !tx_desc->driver_egress_ts) in dp_tx_latency_stats_update()
2525 ingress = qdf_ktime_to_us(tx_desc->driver_ingress_ts); in dp_tx_latency_stats_update()
2526 egress = qdf_ktime_to_us(tx_desc->driver_egress_ts); in dp_tx_latency_stats_update()
3002 struct dp_tx_desc_s *tx_desc) in dp_tx_update_ts_on_enqueued() argument
3009 struct dp_tx_desc_s *tx_desc, in dp_tx_latency_stats_update() argument
3023 struct dp_tx_desc_s *tx_desc; in dp_tx_send_msdu_single() local
3033 tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id, in dp_tx_send_msdu_single()
3035 if (!tx_desc) { in dp_tx_send_msdu_single()
3042 dp_tx_update_tdls_flags(soc, vdev, tx_desc); in dp_tx_send_msdu_single()
3052 dp_tx_bypass_reinjection(soc, tx_desc, tx_exc_metadata); in dp_tx_send_msdu_single()
3059 dp_tx_desc_update_fast_comp_flag(soc, tx_desc, in dp_tx_send_msdu_single()
3062 dp_tx_update_mesh_flags(soc, vdev, tx_desc); in dp_tx_send_msdu_single()
3065 paddr = dp_tx_rmnet_nbuf_map(msdu_info, tx_desc); in dp_tx_send_msdu_single()
3067 paddr = dp_tx_nbuf_map(vdev, tx_desc, nbuf); in dp_tx_send_msdu_single()
3078 tx_desc->dma_addr = paddr; in dp_tx_send_msdu_single()
3079 dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf, in dp_tx_send_msdu_single()
3080 tx_desc->id, DP_TX_DESC_MAP); in dp_tx_send_msdu_single()
3083 status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc, in dp_tx_send_msdu_single()
3089 tx_desc, tx_q->ring_id); in dp_tx_send_msdu_single()
3090 dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf, in dp_tx_send_msdu_single()
3091 tx_desc->id, DP_TX_DESC_UNMAP); in dp_tx_send_msdu_single()
3092 dp_tx_nbuf_unmap(soc, tx_desc); in dp_tx_send_msdu_single()
3097 dp_tx_update_ts_on_enqueued(vdev, msdu_info, tx_desc); in dp_tx_send_msdu_single()
3103 dp_tx_desc_release(soc, tx_desc, tx_q->desc_pool_id); in dp_tx_send_msdu_single()
3253 struct dp_tx_desc_s *tx_desc; in dp_tx_send_msdu_multiple() local
3275 tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info, in dp_tx_send_msdu_multiple()
3278 if (!tx_desc) { in dp_tx_send_msdu_multiple()
3339 tx_desc->msdu_ext_desc->me_buffer = in dp_tx_send_msdu_multiple()
3342 tx_desc->flags |= DP_TX_DESC_FLAG_ME; in dp_tx_send_msdu_multiple()
3346 tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; in dp_tx_send_msdu_multiple()
3386 status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc, in dp_tx_send_msdu_multiple()
3394 tx_desc, tx_q->ring_id); in dp_tx_send_msdu_multiple()
3421 dp_tx_desc_release(soc, tx_desc, in dp_tx_send_msdu_multiple()
3446 dp_tx_comp_free_buf(soc, tx_desc, false); in dp_tx_send_msdu_multiple()
3448 dp_tx_desc_release(soc, tx_desc, in dp_tx_send_msdu_multiple()
3456 dp_tx_desc_release(soc, tx_desc, tx_q->desc_pool_id); in dp_tx_send_msdu_multiple()
3460 dp_tx_update_ts_on_enqueued(vdev, msdu_info, tx_desc); in dp_tx_send_msdu_multiple()
4538 struct dp_tx_desc_s *tx_desc, in dp_tx_reinject_mlo_hdl() argument
4546 dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id); in dp_tx_reinject_mlo_hdl()
4555 struct dp_tx_desc_s *tx_desc, in dp_tx_reinject_mlo_hdl() argument
4565 struct dp_tx_desc_s *tx_desc, in dp_tx_reinject_mlo_hdl() argument
4575 struct dp_tx_desc_s *tx_desc, argument
4581 qdf_nbuf_t nbuf = tx_desc->nbuf;
4598 qdf_nbuf_len(tx_desc->nbuf));
4600 if (dp_tx_reinject_mlo_hdl(soc, vdev, tx_desc, nbuf, reinject_reason))
4634 DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
4692 dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
4697 struct dp_tx_desc_s *tx_desc, argument
4700 uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(tx_desc->nbuf);
4706 qdf_nbuf_len(tx_desc->nbuf));
4708 DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
4709 dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
4721 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc, argument
4724 qdf_nbuf_t netbuf = tx_desc->nbuf;
4726 if (!tx_desc->msdu_ext_desc) {
4727 if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
4730 netbuf, tx_desc->pkt_offset);
4738 void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc, argument
4749 struct dp_tx_desc_s *tx_desc, argument
4753 dp_sawf_tx_compl_update_peer_stats(soc, vdev, txrx_peer, tx_desc,
4771 struct dp_tx_desc_s *tx_desc, argument
4788 struct dp_tx_desc_s *tx_desc, argument
4800 timestamp_ingress = qdf_nbuf_get_timestamp_us(tx_desc->nbuf);
4801 timestamp_hw_enqueue = qdf_ktime_to_us(tx_desc->timestamp);
4829 struct dp_tx_desc_s *tx_desc, argument
4838 timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
4839 timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
4865 struct dp_tx_desc_s *tx_desc, argument
4891 tx_desc, ts, txrx_peer->vdev);
4896 struct dp_tx_desc_s *tx_desc, argument
4973 struct dp_tx_desc_s *tx_desc) argument
4988 struct dp_tx_desc_s *tx_desc) argument
4993 timestamp_hw_enqueue = qdf_ktime_to_us(tx_desc->timestamp);
5011 struct dp_tx_desc_s *tx_desc) argument
5023 tx_desc);
5050 struct dp_tx_desc_s *tx_desc, argument
5077 ts, txrx_peer->vdev, tx_desc);
5081 struct dp_tx_desc_s *tx_desc, argument
5123 void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc, argument
5137 qdf_ktime_to_us(tx_desc->timestamp);
5153 timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
5166 timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
5341 dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc, argument
5366 length = qdf_nbuf_len(tx_desc->nbuf);
5371 dp_tx_compute_delay(txrx_peer->vdev, tx_desc, tid, ring_id);
5413 dp_update_no_ack_stats(tx_desc->nbuf, txrx_peer, link_id);
5476 struct dp_tx_desc_s *tx_desc) argument
5481 desc_pool_id = tx_desc->pool_id;
5482 pool = &soc->tx_desc[desc_pool_id];
5496 struct dp_tx_desc_s *tx_desc) argument
5501 desc_pool_id = tx_desc->pool_id;
5502 pool = &soc->tx_desc[desc_pool_id];
5508 void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc) argument
5513 void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc) argument
5530 struct dp_tx_desc_s *tx_desc, argument
5538 qdf_assert(tx_desc);
5711 struct dp_tx_desc_s *tx_desc, argument
5719 struct dp_tx_desc_s *tx_desc, argument
5726 qdf_assert(tx_desc);
5735 stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
5737 stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT,
6039 struct dp_tx_desc_s *tx_desc, argument
6047 qdf_nbuf_t nbuf = tx_desc->nbuf;
6058 length = dp_tx_get_pkt_len(tx_desc);
6103 vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
6119 dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status);
6124 dp_tx_notify_completion(soc, vdev, tx_desc,
6129 !(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
6130 dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
6164 dp_tx_update_peer_stats(tx_desc, ts, txrx_peer, ring_id, link_id);
6165 dp_tx_update_peer_delay_stats(txrx_peer, tx_desc, ts, ring_id);
6166 dp_tx_update_peer_jitter_stats(txrx_peer, tx_desc, ts, ring_id);
6167 dp_tx_update_peer_sawf_stats(soc, vdev, txrx_peer, tx_desc,
6169 dp_tx_send_pktlog(soc, vdev->pdev, tx_desc, nbuf, dp_status);
6170 dp_tx_latency_stats_update(soc, txrx_peer, tx_desc, ts, link_id);
6175 qdf_ktime_to_ms(tx_desc->timestamp),
6180 DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
6185 tx_desc->id, ts->status, dp_status, op_mode));
6534 void dp_tx_dump_tx_desc(struct dp_tx_desc_s *tx_desc) argument
6536 if (tx_desc) {
6537 dp_tx_comp_warn("tx_desc->nbuf: %pK", tx_desc->nbuf);
6538 dp_tx_comp_warn("tx_desc->flags: 0x%x", tx_desc->flags);
6539 dp_tx_comp_warn("tx_desc->id: %u", tx_desc->id);
6541 (unsigned int)tx_desc->dma_addr);
6543 tx_desc->vdev_id);
6545 tx_desc->tx_status);
6547 tx_desc->pdev);
6549 tx_desc->tx_encap_type);
6551 tx_desc->buffer_src);
6553 tx_desc->frm_type);
6555 tx_desc->pkt_offset);
6557 tx_desc->pool_id);
6626 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc) argument
6628 if ((tx_desc->magic != DP_TX_MAGIC_PATTERN_INUSE) &&
6629 (tx_desc->magic != DP_TX_MAGIC_PATTERN_FREE)) {
6630 dp_err_rl("tx_desc %u is corrupted", tx_desc->id);
6711 struct dp_tx_desc_s *tx_desc = NULL; local
6821 &tx_desc);
6822 if (qdf_unlikely(!tx_desc)) {
6838 tx_desc->buffer_src = buffer_src;
6851 &tx_desc->comp, 1);
6854 tx_desc,
6857 if (qdf_unlikely(!tx_desc->pdev)) {
6858 dp_tx_dump_tx_desc(tx_desc);
6861 if (tx_desc->flags & DP_TX_DESC_FLAG_FASTPATH_SIMPLE ||
6862 tx_desc->flags & DP_TX_DESC_FLAG_PPEDS)
6865 tx_desc->tx_status =
6867 tx_desc->buffer_src = buffer_src;
6872 if (qdf_likely(tx_desc->flags &
6881 ((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
6882 !tx_desc->flags)) {
6884 tx_desc->id);
6886 dp_tx_desc_check_corruption(tx_desc);
6890 if (qdf_unlikely(!tx_desc->pdev)) {
6892 dp_tx_dump_tx_desc(tx_desc);
6897 if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
6899 tx_desc->id);
6900 tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
6901 dp_tx_comp_free_buf(soc, tx_desc, false);
6902 dp_tx_desc_release(soc, tx_desc,
6903 tx_desc->pool_id);
6907 if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
6908 !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
6910 tx_desc->flags, tx_desc->id);
6916 &tx_desc->comp, 1);
6918 DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
6923 if (tx_desc->flags & DP_TX_DESC_FLAG_FASTPATH_SIMPLE ||
6924 tx_desc->flags & DP_TX_DESC_FLAG_PPEDS) {
6925 dp_tx_nbuf_dev_queue_free(&h, tx_desc);
6928 fast_head_desc = tx_desc;
6929 fast_tail_desc = tx_desc;
6931 fast_tail_desc->next = tx_desc;
6932 fast_tail_desc = tx_desc;
6933 dp_tx_desc_clear(tx_desc);
6936 head_desc = tx_desc;
6937 tail_desc = tx_desc;
6940 tail_desc->next = tx_desc;
6941 tx_desc->next = NULL;
6942 tail_desc = tx_desc;
7113 struct dp_tx_desc_s *tx_desc) argument
7115 if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
7118 if (tx_desc->flags & DP_TX_DESC_FLAG_PPEDS)
7125 return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
7126 (tx_desc->pdev == pdev);
7132 struct dp_tx_desc_s *tx_desc) argument
7134 if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
7142 return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
7143 (tx_desc->pdev == pdev);
7156 struct dp_tx_desc_s *tx_desc = NULL; local
7165 tx_desc_pool = &soc->tx_desc[i];
7193 tx_desc = dp_tx_desc_find(soc, i, page_id, offset,
7196 if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
7203 tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
7204 dp_tx_comp_free_buf(soc, tx_desc,
7206 dp_tx_desc_release(soc, tx_desc, i);
7208 tx_desc->vdev_id = DP_INVALID_VDEV_ID;
7226 dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc, argument
7235 tx_desc->vdev_id = DP_INVALID_VDEV_ID;
7248 struct dp_tx_desc_s *tx_desc = NULL; local
7271 tx_desc = dp_tx_desc_find(soc, i, page_id, offset,
7274 if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
7276 dp_tx_comp_free_buf(soc, tx_desc,
7278 dp_tx_desc_release(soc, tx_desc, i);
7280 dp_tx_desc_reset_vdev(soc, tx_desc,
7314 qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
7315 soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
7352 qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);