1 /*
2 * Definitions for the 'struct skb_array' datastructure.
3 *
4 * Author:
5 * Michael S. Tsirkin <mst@redhat.com>
6 *
7 * Copyright (C) 2016 Red Hat, Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * Limited-size FIFO of skbs. Can be used more or less whenever
15 * sk_buff_head can be used, except you need to know the queue size in
16 * advance.
17 * Implemented as a type-safe wrapper around ptr_ring.
18 */
19
20 #ifndef _LINUX_SKB_ARRAY_H
21 #define _LINUX_SKB_ARRAY_H 1
22
23 #ifdef __KERNEL__
24 #include <linux/ptr_ring.h>
25 #include <linux/skbuff.h>
26 #include <linux/if_vlan.h>
27 #endif
28
29 struct skb_array {
30 struct ptr_ring ring;
31 };
32
33 /* Might be slightly faster than skb_array_full below, but callers invoking
34 * this in a loop must use a compiler barrier, for example cpu_relax().
35 */
__skb_array_full(struct skb_array * a)36 static inline bool __skb_array_full(struct skb_array *a)
37 {
38 return __ptr_ring_full(&a->ring);
39 }
40
skb_array_full(struct skb_array * a)41 static inline bool skb_array_full(struct skb_array *a)
42 {
43 return ptr_ring_full(&a->ring);
44 }
45
skb_array_produce(struct skb_array * a,struct sk_buff * skb)46 static inline int skb_array_produce(struct skb_array *a, struct sk_buff *skb)
47 {
48 return ptr_ring_produce(&a->ring, skb);
49 }
50
skb_array_produce_irq(struct skb_array * a,struct sk_buff * skb)51 static inline int skb_array_produce_irq(struct skb_array *a, struct sk_buff *skb)
52 {
53 return ptr_ring_produce_irq(&a->ring, skb);
54 }
55
skb_array_produce_bh(struct skb_array * a,struct sk_buff * skb)56 static inline int skb_array_produce_bh(struct skb_array *a, struct sk_buff *skb)
57 {
58 return ptr_ring_produce_bh(&a->ring, skb);
59 }
60
skb_array_produce_any(struct skb_array * a,struct sk_buff * skb)61 static inline int skb_array_produce_any(struct skb_array *a, struct sk_buff *skb)
62 {
63 return ptr_ring_produce_any(&a->ring, skb);
64 }
65
66 /* Might be slightly faster than skb_array_empty below, but only safe if the
67 * array is never resized. Also, callers invoking this in a loop must take care
68 * to use a compiler barrier, for example cpu_relax().
69 */
__skb_array_empty(struct skb_array * a)70 static inline bool __skb_array_empty(struct skb_array *a)
71 {
72 return __ptr_ring_empty(&a->ring);
73 }
74
__skb_array_peek(struct skb_array * a)75 static inline struct sk_buff *__skb_array_peek(struct skb_array *a)
76 {
77 return __ptr_ring_peek(&a->ring);
78 }
79
skb_array_empty(struct skb_array * a)80 static inline bool skb_array_empty(struct skb_array *a)
81 {
82 return ptr_ring_empty(&a->ring);
83 }
84
skb_array_empty_bh(struct skb_array * a)85 static inline bool skb_array_empty_bh(struct skb_array *a)
86 {
87 return ptr_ring_empty_bh(&a->ring);
88 }
89
skb_array_empty_irq(struct skb_array * a)90 static inline bool skb_array_empty_irq(struct skb_array *a)
91 {
92 return ptr_ring_empty_irq(&a->ring);
93 }
94
skb_array_empty_any(struct skb_array * a)95 static inline bool skb_array_empty_any(struct skb_array *a)
96 {
97 return ptr_ring_empty_any(&a->ring);
98 }
99
__skb_array_consume(struct skb_array * a)100 static inline struct sk_buff *__skb_array_consume(struct skb_array *a)
101 {
102 return __ptr_ring_consume(&a->ring);
103 }
104
skb_array_consume(struct skb_array * a)105 static inline struct sk_buff *skb_array_consume(struct skb_array *a)
106 {
107 return ptr_ring_consume(&a->ring);
108 }
109
skb_array_consume_batched(struct skb_array * a,struct sk_buff ** array,int n)110 static inline int skb_array_consume_batched(struct skb_array *a,
111 struct sk_buff **array, int n)
112 {
113 return ptr_ring_consume_batched(&a->ring, (void **)array, n);
114 }
115
skb_array_consume_irq(struct skb_array * a)116 static inline struct sk_buff *skb_array_consume_irq(struct skb_array *a)
117 {
118 return ptr_ring_consume_irq(&a->ring);
119 }
120
skb_array_consume_batched_irq(struct skb_array * a,struct sk_buff ** array,int n)121 static inline int skb_array_consume_batched_irq(struct skb_array *a,
122 struct sk_buff **array, int n)
123 {
124 return ptr_ring_consume_batched_irq(&a->ring, (void **)array, n);
125 }
126
skb_array_consume_any(struct skb_array * a)127 static inline struct sk_buff *skb_array_consume_any(struct skb_array *a)
128 {
129 return ptr_ring_consume_any(&a->ring);
130 }
131
skb_array_consume_batched_any(struct skb_array * a,struct sk_buff ** array,int n)132 static inline int skb_array_consume_batched_any(struct skb_array *a,
133 struct sk_buff **array, int n)
134 {
135 return ptr_ring_consume_batched_any(&a->ring, (void **)array, n);
136 }
137
138
skb_array_consume_bh(struct skb_array * a)139 static inline struct sk_buff *skb_array_consume_bh(struct skb_array *a)
140 {
141 return ptr_ring_consume_bh(&a->ring);
142 }
143
skb_array_consume_batched_bh(struct skb_array * a,struct sk_buff ** array,int n)144 static inline int skb_array_consume_batched_bh(struct skb_array *a,
145 struct sk_buff **array, int n)
146 {
147 return ptr_ring_consume_batched_bh(&a->ring, (void **)array, n);
148 }
149
__skb_array_len_with_tag(struct sk_buff * skb)150 static inline int __skb_array_len_with_tag(struct sk_buff *skb)
151 {
152 if (likely(skb)) {
153 int len = skb->len;
154
155 if (skb_vlan_tag_present(skb))
156 len += VLAN_HLEN;
157
158 return len;
159 } else {
160 return 0;
161 }
162 }
163
skb_array_peek_len(struct skb_array * a)164 static inline int skb_array_peek_len(struct skb_array *a)
165 {
166 return PTR_RING_PEEK_CALL(&a->ring, __skb_array_len_with_tag);
167 }
168
skb_array_peek_len_irq(struct skb_array * a)169 static inline int skb_array_peek_len_irq(struct skb_array *a)
170 {
171 return PTR_RING_PEEK_CALL_IRQ(&a->ring, __skb_array_len_with_tag);
172 }
173
skb_array_peek_len_bh(struct skb_array * a)174 static inline int skb_array_peek_len_bh(struct skb_array *a)
175 {
176 return PTR_RING_PEEK_CALL_BH(&a->ring, __skb_array_len_with_tag);
177 }
178
skb_array_peek_len_any(struct skb_array * a)179 static inline int skb_array_peek_len_any(struct skb_array *a)
180 {
181 return PTR_RING_PEEK_CALL_ANY(&a->ring, __skb_array_len_with_tag);
182 }
183
skb_array_init(struct skb_array * a,int size,gfp_t gfp)184 static inline int skb_array_init(struct skb_array *a, int size, gfp_t gfp)
185 {
186 return ptr_ring_init(&a->ring, size, gfp);
187 }
188
__skb_array_destroy_skb(void * ptr)189 static void __skb_array_destroy_skb(void *ptr)
190 {
191 kfree_skb(ptr);
192 }
193
skb_array_unconsume(struct skb_array * a,struct sk_buff ** skbs,int n)194 static inline void skb_array_unconsume(struct skb_array *a,
195 struct sk_buff **skbs, int n)
196 {
197 ptr_ring_unconsume(&a->ring, (void **)skbs, n, __skb_array_destroy_skb);
198 }
199
skb_array_resize(struct skb_array * a,int size,gfp_t gfp)200 static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
201 {
202 return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb);
203 }
204
skb_array_resize_multiple(struct skb_array ** rings,int nrings,unsigned int size,gfp_t gfp)205 static inline int skb_array_resize_multiple(struct skb_array **rings,
206 int nrings, unsigned int size,
207 gfp_t gfp)
208 {
209 BUILD_BUG_ON(offsetof(struct skb_array, ring));
210 return ptr_ring_resize_multiple((struct ptr_ring **)rings,
211 nrings, size, gfp,
212 __skb_array_destroy_skb);
213 }
214
skb_array_cleanup(struct skb_array * a)215 static inline void skb_array_cleanup(struct skb_array *a)
216 {
217 ptr_ring_cleanup(&a->ring, __skb_array_destroy_skb);
218 }
219
220 #endif /* _LINUX_SKB_ARRAY_H */
221