1 /*
2 * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 /*=== header file includes ===*/
20 /* generic utilities */
21 #include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
22 #include <qdf_timer.h>
23 #include <qdf_time.h>
24
25 /* datapath internal interfaces */
26 #include <ol_txrx_internal.h> /* TXRX_ASSERT, etc. */
27 #include <ol_rx_reorder.h> /* ol_rx_reorder_flush, etc. */
28 #include <ol_rx_reorder_timeout.h>
29
30 #ifdef QCA_SUPPORT_OL_RX_REORDER_TIMEOUT
31
ol_rx_reorder_timeout_remove(struct ol_txrx_peer_t * peer,unsigned int tid)32 void ol_rx_reorder_timeout_remove(struct ol_txrx_peer_t *peer, unsigned int tid)
33 {
34 struct ol_txrx_pdev_t *pdev;
35 struct ol_tx_reorder_cat_timeout_t *rx_reorder_timeout_ac;
36 struct ol_rx_reorder_timeout_list_elem_t *list_elem;
37 int ac;
38
39 pdev = peer->vdev->pdev;
40 ac = TXRX_TID_TO_WMM_AC(tid);
41 rx_reorder_timeout_ac = &pdev->rx.reorder_timeout.access_cats[ac];
42 list_elem = &peer->tids_rx_reorder[tid].timeout;
43 if (!list_elem->active) {
44 /* this element has already been removed */
45 return;
46 }
47 list_elem->active = 0;
48 TAILQ_REMOVE(&rx_reorder_timeout_ac->virtual_timer_list, list_elem,
49 reorder_timeout_list_elem);
50 }
51
52 static void
ol_rx_reorder_timeout_start(struct ol_tx_reorder_cat_timeout_t * rx_reorder_timeout_ac,uint32_t time_now_ms)53 ol_rx_reorder_timeout_start(struct ol_tx_reorder_cat_timeout_t
54 *rx_reorder_timeout_ac, uint32_t time_now_ms)
55 {
56 uint32_t duration_ms;
57 struct ol_rx_reorder_timeout_list_elem_t *list_elem;
58
59 list_elem = TAILQ_FIRST(&rx_reorder_timeout_ac->virtual_timer_list);
60
61 duration_ms = list_elem->timestamp_ms - time_now_ms;
62 qdf_timer_start(&rx_reorder_timeout_ac->timer, duration_ms);
63 }
64
65 static inline void
ol_rx_reorder_timeout_add(struct ol_txrx_peer_t * peer,uint8_t tid)66 ol_rx_reorder_timeout_add(struct ol_txrx_peer_t *peer, uint8_t tid)
67 {
68 uint32_t time_now_ms;
69 struct ol_txrx_pdev_t *pdev;
70 struct ol_tx_reorder_cat_timeout_t *rx_reorder_timeout_ac;
71 struct ol_rx_reorder_timeout_list_elem_t *list_elem;
72 int ac;
73 int start;
74
75 pdev = peer->vdev->pdev;
76 ac = TXRX_TID_TO_WMM_AC(tid);
77 rx_reorder_timeout_ac = &pdev->rx.reorder_timeout.access_cats[ac];
78 list_elem = &peer->tids_rx_reorder[tid].timeout;
79
80 list_elem->active = 1;
81 list_elem->peer = peer;
82 list_elem->tid = tid;
83
84 /* set the expiration timestamp */
85 time_now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks());
86 list_elem->timestamp_ms =
87 time_now_ms + rx_reorder_timeout_ac->duration_ms;
88
89 /* add to the queue */
90 start = TAILQ_EMPTY(&rx_reorder_timeout_ac->virtual_timer_list);
91 TAILQ_INSERT_TAIL(&rx_reorder_timeout_ac->virtual_timer_list,
92 list_elem, reorder_timeout_list_elem);
93 if (start)
94 ol_rx_reorder_timeout_start(rx_reorder_timeout_ac, time_now_ms);
95 }
96
ol_rx_reorder_timeout_update(struct ol_txrx_peer_t * peer,uint8_t tid)97 void ol_rx_reorder_timeout_update(struct ol_txrx_peer_t *peer, uint8_t tid)
98 {
99 if (!peer)
100 return;
101
102 /*
103 * If there are no holes, i.e. no queued frames,
104 * then timeout doesn't apply.
105 */
106 if (peer->tids_rx_reorder[tid].num_mpdus == 0)
107 return;
108
109 /*
110 * If the virtual timer for this peer-TID is already running,
111 * then leave it.
112 */
113 if (peer->tids_rx_reorder[tid].timeout.active)
114 return;
115
116 ol_rx_reorder_timeout_add(peer, tid);
117 }
118
ol_rx_reorder_timeout(void * arg)119 static void ol_rx_reorder_timeout(void *arg)
120 {
121 struct ol_txrx_pdev_t *pdev;
122 struct ol_rx_reorder_timeout_list_elem_t *list_elem, *tmp;
123 uint32_t time_now_ms;
124 struct ol_tx_reorder_cat_timeout_t *rx_reorder_timeout_ac;
125
126 rx_reorder_timeout_ac = (struct ol_tx_reorder_cat_timeout_t *)arg;
127 time_now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks());
128
129 pdev = rx_reorder_timeout_ac->pdev;
130 qdf_spin_lock(&pdev->rx.mutex);
131 /* TODO: conditionally take mutex lock during regular rx */
132 TAILQ_FOREACH_SAFE(list_elem,
133 &rx_reorder_timeout_ac->virtual_timer_list,
134 reorder_timeout_list_elem, tmp) {
135 unsigned int idx_start, idx_end;
136 struct ol_txrx_peer_t *peer;
137
138 if (list_elem->timestamp_ms > time_now_ms)
139 break; /* time has not expired yet for this element */
140
141 list_elem->active = 0;
142 /* remove the expired element from the list */
143 TAILQ_REMOVE(&rx_reorder_timeout_ac->virtual_timer_list,
144 list_elem, reorder_timeout_list_elem);
145
146 peer = list_elem->peer;
147
148 idx_start = 0xffff; /* start from next_rel_idx */
149 ol_rx_reorder_first_hole(peer, list_elem->tid, &idx_end);
150 ol_rx_reorder_flush(peer->vdev,
151 peer,
152 list_elem->tid,
153 idx_start, idx_end, htt_rx_flush_release);
154 }
155 /* restart the timer if unexpired elements are left in the list */
156 if (!TAILQ_EMPTY(&rx_reorder_timeout_ac->virtual_timer_list))
157 ol_rx_reorder_timeout_start(rx_reorder_timeout_ac, time_now_ms);
158
159 qdf_spin_unlock(&pdev->rx.mutex);
160 }
161
ol_rx_reorder_timeout_init(struct ol_txrx_pdev_t * pdev)162 void ol_rx_reorder_timeout_init(struct ol_txrx_pdev_t *pdev)
163 {
164 int i;
165
166 for (i = 0; i < QDF_ARRAY_SIZE(pdev->rx.reorder_timeout.access_cats);
167 i++) {
168 struct ol_tx_reorder_cat_timeout_t *rx_reorder_timeout_ac;
169
170 rx_reorder_timeout_ac =
171 &pdev->rx.reorder_timeout.access_cats[i];
172 /* init the per-AC timers */
173 qdf_timer_init(pdev->osdev,
174 &rx_reorder_timeout_ac->timer,
175 ol_rx_reorder_timeout,
176 rx_reorder_timeout_ac,
177 QDF_TIMER_TYPE_SW);
178 /* init the virtual timer list */
179 TAILQ_INIT(&rx_reorder_timeout_ac->virtual_timer_list);
180 rx_reorder_timeout_ac->pdev = pdev;
181 }
182 pdev->rx.reorder_timeout.access_cats[TXRX_WMM_AC_VO].duration_ms = 40;
183 pdev->rx.reorder_timeout.access_cats[TXRX_WMM_AC_VI].duration_ms = 100;
184 pdev->rx.reorder_timeout.access_cats[TXRX_WMM_AC_BE].duration_ms = 100;
185 pdev->rx.reorder_timeout.access_cats[TXRX_WMM_AC_BK].duration_ms = 100;
186 }
187
ol_rx_reorder_timeout_peer_cleanup(struct ol_txrx_peer_t * peer)188 void ol_rx_reorder_timeout_peer_cleanup(struct ol_txrx_peer_t *peer)
189 {
190 int tid;
191
192 for (tid = 0; tid < OL_TXRX_NUM_EXT_TIDS; tid++) {
193 if (peer->tids_rx_reorder[tid].timeout.active)
194 ol_rx_reorder_timeout_remove(peer, tid);
195 }
196 }
197
ol_rx_reorder_timeout_cleanup(struct ol_txrx_pdev_t * pdev)198 void ol_rx_reorder_timeout_cleanup(struct ol_txrx_pdev_t *pdev)
199 {
200 int i;
201
202 for (i = 0; i < QDF_ARRAY_SIZE(pdev->rx.reorder_timeout.access_cats);
203 i++) {
204 struct ol_tx_reorder_cat_timeout_t *rx_reorder_timeout_ac;
205
206 rx_reorder_timeout_ac =
207 &pdev->rx.reorder_timeout.access_cats[i];
208 qdf_timer_stop(&rx_reorder_timeout_ac->timer);
209 qdf_timer_free(&rx_reorder_timeout_ac->timer);
210 }
211 }
212
213 #endif /* QCA_SUPPORT_OL_RX_REORDER_TIMEOUT */
214