1 /*
2 * Copyright (c) 2015-2017, 2019 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /**
21 * DOC: qdf_lro.c
22 * QCA driver framework(QDF) Large Receive Offload
23 */
24
25 #include <qdf_lro.h>
26 #include <qdf_trace.h>
27 #include <qdf_types.h>
28
29 #include <linux/list.h>
30 #include <net/tcp.h>
31
32 /**
33 * qdf_lro_desc_pool_init() - Initialize the free pool of LRO
34 * descriptors
35 * @lro_desc_pool: free pool of the LRO descriptors
36 * @lro_mgr: LRO manager
37 *
38 * Initialize a list that holds the free LRO descriptors
39 *
40 * Return: none
41 */
qdf_lro_desc_pool_init(struct qdf_lro_desc_pool * lro_desc_pool,struct net_lro_mgr * lro_mgr)42 static void qdf_lro_desc_pool_init(struct qdf_lro_desc_pool *lro_desc_pool,
43 struct net_lro_mgr *lro_mgr)
44 {
45 int i;
46
47 INIT_LIST_HEAD(&lro_desc_pool->lro_free_list_head);
48
49 for (i = 0; i < QDF_LRO_DESC_POOL_SZ; i++) {
50 lro_desc_pool->lro_desc_array[i].lro_desc =
51 &lro_mgr->lro_arr[i];
52 list_add_tail(&lro_desc_pool->lro_desc_array[i].lro_node,
53 &lro_desc_pool->lro_free_list_head);
54 }
55 }
56
57 /**
58 * qdf_lro_desc_info_init() - Initialize the LRO descriptors
59 * @qdf_info: QDF LRO data structure
60 *
61 * Initialize the free pool of LRO descriptors and the entries
62 * of the hash table
63 *
64 * Return: none
65 */
qdf_lro_desc_info_init(struct qdf_lro_s * qdf_info)66 static void qdf_lro_desc_info_init(struct qdf_lro_s *qdf_info)
67 {
68 int i;
69
70 /* Initialize pool of free LRO desc.*/
71 qdf_lro_desc_pool_init(&qdf_info->lro_desc_info.lro_desc_pool,
72 qdf_info->lro_mgr);
73
74 /* Initialize the hash table of LRO desc.*/
75 for (i = 0; i < QDF_LRO_DESC_TABLE_SZ; i++) {
76 /* initialize the flows in the hash table */
77 INIT_LIST_HEAD(&qdf_info->lro_desc_info.
78 lro_hash_table[i].lro_desc_list);
79 }
80
81 }
82
83 /**
84 * qdf_lro_get_skb_header() - LRO callback function
85 * @skb: network buffer
86 * @ip_hdr: contains a pointer to the IP header
87 * @tcpudp_hdr: contains a pointer to the TCP header
88 * @hdr_flags: indicates if this is a TCP, IPV4 frame
89 * @priv: private driver specific opaque pointer
90 *
91 * Get the IP and TCP headers from the skb
92 *
93 * Return: 0 - success, < 0 - failure
94 */
qdf_lro_get_skb_header(struct sk_buff * skb,void ** ip_hdr,void ** tcpudp_hdr,u64 * hdr_flags,void * priv)95 static int qdf_lro_get_skb_header(struct sk_buff *skb, void **ip_hdr,
96 void **tcpudp_hdr, u64 *hdr_flags, void *priv)
97 {
98 if (QDF_NBUF_CB_RX_IPV6_PROTO(skb)) {
99 hdr_flags = 0;
100 return -EINVAL;
101 }
102
103 *hdr_flags |= (LRO_IPV4 | LRO_TCP);
104 (*ip_hdr) = skb->data;
105 (*tcpudp_hdr) = skb->data + QDF_NBUF_CB_RX_TCP_OFFSET(skb);
106 return 0;
107 }
108
qdf_lro_init(void)109 qdf_lro_ctx_t qdf_lro_init(void)
110 {
111 struct qdf_lro_s *lro_ctx;
112 size_t lro_info_sz, lro_mgr_sz, desc_arr_sz, desc_pool_sz;
113 size_t hash_table_sz;
114 uint8_t *lro_mem_ptr;
115
116 /*
117 * Allocate all the LRO data structures at once and then carve
118 * them up as needed
119 */
120 lro_info_sz = sizeof(struct qdf_lro_s);
121 lro_mgr_sz = sizeof(struct net_lro_mgr);
122 desc_arr_sz =
123 (QDF_LRO_DESC_POOL_SZ * sizeof(struct net_lro_desc));
124 desc_pool_sz =
125 (QDF_LRO_DESC_POOL_SZ * sizeof(struct qdf_lro_desc_entry));
126 hash_table_sz =
127 (sizeof(struct qdf_lro_desc_table) * QDF_LRO_DESC_TABLE_SZ);
128
129 lro_mem_ptr = qdf_mem_malloc(lro_info_sz + lro_mgr_sz + desc_arr_sz +
130 desc_pool_sz + hash_table_sz);
131
132 if (unlikely(!lro_mem_ptr))
133 return NULL;
134
135 lro_ctx = (struct qdf_lro_s *)lro_mem_ptr;
136 lro_mem_ptr += lro_info_sz;
137 /* LRO manager */
138 lro_ctx->lro_mgr = (struct net_lro_mgr *)lro_mem_ptr;
139 lro_mem_ptr += lro_mgr_sz;
140
141 /* LRO descriptor array */
142 lro_ctx->lro_mgr->lro_arr = (struct net_lro_desc *)lro_mem_ptr;
143 lro_mem_ptr += desc_arr_sz;
144
145 /* LRO descriptor pool */
146 lro_ctx->lro_desc_info.lro_desc_pool.lro_desc_array =
147 (struct qdf_lro_desc_entry *)lro_mem_ptr;
148 lro_mem_ptr += desc_pool_sz;
149
150 /* hash table to store the LRO descriptors */
151 lro_ctx->lro_desc_info.lro_hash_table =
152 (struct qdf_lro_desc_table *)lro_mem_ptr;
153
154 /* Initialize the LRO descriptors */
155 qdf_lro_desc_info_init(lro_ctx);
156
157 /* LRO TODO - NAPI or RX thread */
158 lro_ctx->lro_mgr->features |= LRO_F_NAPI;
159
160 lro_ctx->lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
161 lro_ctx->lro_mgr->max_aggr = QDF_LRO_MAX_AGGR_SIZE;
162 lro_ctx->lro_mgr->get_skb_header = qdf_lro_get_skb_header;
163 lro_ctx->lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
164 lro_ctx->lro_mgr->max_desc = QDF_LRO_DESC_POOL_SZ;
165
166 return lro_ctx;
167 }
168
qdf_lro_deinit(qdf_lro_ctx_t lro_ctx)169 void qdf_lro_deinit(qdf_lro_ctx_t lro_ctx)
170 {
171 if (likely(lro_ctx)) {
172 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
173 "LRO instance %pK is being freed", lro_ctx);
174 qdf_mem_free(lro_ctx);
175 }
176 }
177
178 /**
179 * qdf_lro_tcp_flow_match() - function to check for a flow match
180 * @lro_desc: LRO descriptor
181 * @iph: IP header
182 * @tcph: TCP header
183 *
184 * Checks if the descriptor belongs to the same flow as the one
185 * indicated by the TCP and IP header.
186 *
187 * Return: true - flow match, false - flow does not match
188 */
qdf_lro_tcp_flow_match(struct net_lro_desc * lro_desc,struct iphdr * iph,struct tcphdr * tcph)189 static inline bool qdf_lro_tcp_flow_match(struct net_lro_desc *lro_desc,
190 struct iphdr *iph,
191 struct tcphdr *tcph)
192 {
193 if ((lro_desc->tcph->source != tcph->source) ||
194 (lro_desc->tcph->dest != tcph->dest) ||
195 (lro_desc->iph->saddr != iph->saddr) ||
196 (lro_desc->iph->daddr != iph->daddr))
197 return false;
198
199 return true;
200
201 }
202
203 /**
204 * qdf_lro_desc_find() - LRO descriptor look-up function
205 *
206 * @lro_ctx: LRO context
207 * @skb: network buffer
208 * @iph: IP header
209 * @tcph: TCP header
210 * @flow_hash: toeplitz hash
211 * @lro_desc: LRO descriptor to be returned
212 *
213 * Look-up the LRO descriptor in the hash table based on the
214 * flow ID toeplitz. If the flow is not found, allocates a new
215 * LRO descriptor and places it in the hash table
216 *
217 * Return: 0 - success, < 0 - failure
218 */
qdf_lro_desc_find(struct qdf_lro_s * lro_ctx,struct sk_buff * skb,struct iphdr * iph,struct tcphdr * tcph,uint32_t flow_hash,struct net_lro_desc ** lro_desc)219 static int qdf_lro_desc_find(struct qdf_lro_s *lro_ctx,
220 struct sk_buff *skb, struct iphdr *iph, struct tcphdr *tcph,
221 uint32_t flow_hash, struct net_lro_desc **lro_desc)
222 {
223 uint32_t i;
224 struct qdf_lro_desc_table *lro_hash_table;
225 struct list_head *ptr;
226 struct qdf_lro_desc_entry *entry;
227 struct qdf_lro_desc_pool *free_pool;
228 struct qdf_lro_desc_info *desc_info = &lro_ctx->lro_desc_info;
229
230 *lro_desc = NULL;
231 i = flow_hash & QDF_LRO_DESC_TABLE_SZ_MASK;
232
233 lro_hash_table = &desc_info->lro_hash_table[i];
234
235 if (unlikely(!lro_hash_table)) {
236 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
237 "Invalid hash entry");
238 QDF_ASSERT(0);
239 return -EINVAL;
240 }
241
242 /* Check if this flow exists in the descriptor list */
243 list_for_each(ptr, &lro_hash_table->lro_desc_list) {
244 struct net_lro_desc *tmp_lro_desc = NULL;
245
246 entry = list_entry(ptr, struct qdf_lro_desc_entry, lro_node);
247 tmp_lro_desc = entry->lro_desc;
248 if (qdf_lro_tcp_flow_match(entry->lro_desc, iph, tcph)) {
249 *lro_desc = entry->lro_desc;
250 return 0;
251 }
252 }
253
254 /* no existing flow found, a new LRO desc needs to be allocated */
255 free_pool = &lro_ctx->lro_desc_info.lro_desc_pool;
256 entry = list_first_entry_or_null(
257 &free_pool->lro_free_list_head,
258 struct qdf_lro_desc_entry, lro_node);
259 if (unlikely(!entry)) {
260 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
261 "Could not allocate LRO desc!");
262 return -ENOMEM;
263 }
264
265 list_del_init(&entry->lro_node);
266
267 if (unlikely(!entry->lro_desc)) {
268 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
269 "entry->lro_desc is NULL!");
270 return -EINVAL;
271 }
272
273 memset(entry->lro_desc, 0, sizeof(struct net_lro_desc));
274
275 /*
276 * lro_desc->active should be 0 and lro_desc->tcp_rcv_tsval
277 * should be 0 for newly allocated lro descriptors
278 */
279 list_add_tail(&entry->lro_node,
280 &lro_hash_table->lro_desc_list);
281
282 *lro_desc = entry->lro_desc;
283 return 0;
284 }
285
286 /**
287 * qdf_lro_get_info() - Update the LRO information
288 *
289 * @lro_ctx: LRO context
290 * @nbuf: network buffer
291 * @info: LRO related information passed in by the caller
292 * @plro_desc: lro information returned as output
293 *
294 * Look-up the LRO descriptor based on the LRO information and
295 * the network buffer provided. Update the skb cb with the
296 * descriptor found
297 *
298 * Return: true: LRO eligible false: LRO ineligible
299 */
qdf_lro_get_info(qdf_lro_ctx_t lro_ctx,qdf_nbuf_t nbuf,struct qdf_lro_info * info,void ** plro_desc)300 bool qdf_lro_get_info(qdf_lro_ctx_t lro_ctx, qdf_nbuf_t nbuf,
301 struct qdf_lro_info *info,
302 void **plro_desc)
303 {
304 struct net_lro_desc *lro_desc;
305 struct iphdr *iph;
306 struct tcphdr *tcph;
307 int hw_lro_eligible =
308 QDF_NBUF_CB_RX_LRO_ELIGIBLE(nbuf) &&
309 (!QDF_NBUF_CB_RX_TCP_PURE_ACK(nbuf));
310
311 if (unlikely(!lro_ctx)) {
312 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
313 "Invalid LRO context");
314 return false;
315 }
316
317 if (!hw_lro_eligible)
318 return false;
319
320 iph = (struct iphdr *)info->iph;
321 tcph = (struct tcphdr *)info->tcph;
322 if (0 != qdf_lro_desc_find(lro_ctx, nbuf, iph, tcph,
323 QDF_NBUF_CB_RX_FLOW_ID(nbuf),
324 (struct net_lro_desc **)plro_desc)) {
325 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
326 "finding the LRO desc failed");
327 return false;
328 }
329
330 lro_desc = (struct net_lro_desc *)(*plro_desc);
331 if (unlikely(!lro_desc)) {
332 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
333 "finding the LRO desc failed");
334 return false;
335 }
336
337 /* if this is not the first skb, check the timestamp option */
338 if (lro_desc->tcp_rcv_tsval) {
339 if (tcph->doff == 8) {
340 __be32 *topt = (__be32 *)(tcph + 1);
341
342 if (*topt != htonl((TCPOPT_NOP << 24)
343 |(TCPOPT_NOP << 16)
344 | (TCPOPT_TIMESTAMP << 8)
345 | TCPOLEN_TIMESTAMP))
346 return true;
347
348 /* timestamp should be in right order */
349 topt++;
350 if (after(ntohl(lro_desc->tcp_rcv_tsval),
351 ntohl(*topt)))
352 return false;
353
354 /* timestamp reply should not be zero */
355 topt++;
356 if (*topt == 0)
357 return false;
358 }
359 }
360
361 return true;
362 }
363
qdf_lro_desc_free(qdf_lro_ctx_t lro_ctx,void * data)364 void qdf_lro_desc_free(qdf_lro_ctx_t lro_ctx, void *data)
365 {
366 struct qdf_lro_desc_entry *entry;
367 struct net_lro_mgr *lro_mgr;
368 struct net_lro_desc *arr_base;
369 struct qdf_lro_desc_info *desc_info;
370 int i;
371 struct net_lro_desc *desc = (struct net_lro_desc *)data;
372
373 qdf_assert(desc);
374 qdf_assert(lro_ctx);
375
376 if (unlikely(!desc || !lro_ctx)) {
377 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
378 "invalid input");
379 return;
380 }
381
382 lro_mgr = lro_ctx->lro_mgr;
383 arr_base = lro_mgr->lro_arr;
384 i = desc - arr_base;
385
386 if (unlikely(i >= QDF_LRO_DESC_POOL_SZ)) {
387 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
388 "invalid index %d", i);
389 return;
390 }
391
392 desc_info = &lro_ctx->lro_desc_info;
393 entry = &desc_info->lro_desc_pool.lro_desc_array[i];
394
395 list_del_init(&entry->lro_node);
396
397 list_add_tail(&entry->lro_node, &desc_info->
398 lro_desc_pool.lro_free_list_head);
399 }
400
qdf_lro_flush(qdf_lro_ctx_t lro_ctx)401 void qdf_lro_flush(qdf_lro_ctx_t lro_ctx)
402 {
403 struct net_lro_mgr *lro_mgr = lro_ctx->lro_mgr;
404 int i;
405
406 for (i = 0; i < lro_mgr->max_desc; i++) {
407 if (lro_mgr->lro_arr[i].active) {
408 qdf_lro_desc_free(lro_ctx, &lro_mgr->lro_arr[i]);
409 lro_flush_desc(lro_mgr, &lro_mgr->lro_arr[i]);
410 }
411 }
412 }
413
414 /**
415 * qdf_lro_get_desc() - LRO descriptor look-up function
416 * @iph: IP header
417 * @tcph: TCP header
418 * @lro_arr: Array of LRO descriptors
419 * @lro_mgr: LRO manager
420 *
421 * Looks-up the LRO descriptor for a given flow
422 *
423 * Return: LRO descriptor
424 */
qdf_lro_get_desc(struct net_lro_mgr * lro_mgr,struct net_lro_desc * lro_arr,struct iphdr * iph,struct tcphdr * tcph)425 static struct net_lro_desc *qdf_lro_get_desc(struct net_lro_mgr *lro_mgr,
426 struct net_lro_desc *lro_arr,
427 struct iphdr *iph,
428 struct tcphdr *tcph)
429 {
430 int i;
431
432 for (i = 0; i < lro_mgr->max_desc; i++) {
433 if (lro_arr[i].active)
434 if (qdf_lro_tcp_flow_match(&lro_arr[i], iph, tcph))
435 return &lro_arr[i];
436 }
437
438 return NULL;
439 }
440
qdf_lro_flush_pkt(qdf_lro_ctx_t lro_ctx,struct qdf_lro_info * info)441 void qdf_lro_flush_pkt(qdf_lro_ctx_t lro_ctx,
442 struct qdf_lro_info *info)
443 {
444 struct net_lro_desc *lro_desc;
445 struct net_lro_mgr *lro_mgr = lro_ctx->lro_mgr;
446 struct iphdr *iph = (struct iphdr *) info->iph;
447 struct tcphdr *tcph = (struct tcphdr *) info->tcph;
448
449 lro_desc = qdf_lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph);
450
451 if (lro_desc) {
452 /* statistics */
453 qdf_lro_desc_free(lro_ctx, lro_desc);
454 lro_flush_desc(lro_mgr, lro_desc);
455 }
456 }
457