1 /*
2  * include/linux/if_team.h - Network team device driver header
3  * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  */
10 #ifndef _LINUX_IF_TEAM_H_
11 #define _LINUX_IF_TEAM_H_
12 
13 #include <linux/netpoll.h>
14 #include <net/sch_generic.h>
15 #include <linux/types.h>
16 #include <uapi/linux/if_team.h>
17 
18 struct team_pcpu_stats {
19 	u64			rx_packets;
20 	u64			rx_bytes;
21 	u64			rx_multicast;
22 	u64			tx_packets;
23 	u64			tx_bytes;
24 	struct u64_stats_sync	syncp;
25 	u32			rx_dropped;
26 	u32			tx_dropped;
27 	u32			rx_nohandler;
28 };
29 
30 struct team;
31 
32 struct team_port {
33 	struct net_device *dev;
34 	struct hlist_node hlist; /* node in enabled ports hash list */
35 	struct list_head list; /* node in ordinary list */
36 	struct team *team;
37 	int index; /* index of enabled port. If disabled, it's set to -1 */
38 
39 	bool linkup; /* either state.linkup or user.linkup */
40 
41 	struct {
42 		bool linkup;
43 		u32 speed;
44 		u8 duplex;
45 	} state;
46 
47 	/* Values set by userspace */
48 	struct {
49 		bool linkup;
50 		bool linkup_enabled;
51 	} user;
52 
53 	/* Custom gennetlink interface related flags */
54 	bool changed;
55 	bool removed;
56 
57 	/*
58 	 * A place for storing original values of the device before it
59 	 * become a port.
60 	 */
61 	struct {
62 		unsigned char dev_addr[MAX_ADDR_LEN];
63 		unsigned int mtu;
64 	} orig;
65 
66 #ifdef CONFIG_NET_POLL_CONTROLLER
67 	struct netpoll *np;
68 #endif
69 
70 	s32 priority; /* lower number ~ higher priority */
71 	u16 queue_id;
72 	struct list_head qom_list; /* node in queue override mapping list */
73 	struct rcu_head	rcu;
74 	long mode_priv[0];
75 };
76 
team_port_get_rcu(const struct net_device * dev)77 static inline struct team_port *team_port_get_rcu(const struct net_device *dev)
78 {
79 	return rcu_dereference(dev->rx_handler_data);
80 }
81 
team_port_enabled(struct team_port * port)82 static inline bool team_port_enabled(struct team_port *port)
83 {
84 	return port->index != -1;
85 }
86 
team_port_txable(struct team_port * port)87 static inline bool team_port_txable(struct team_port *port)
88 {
89 	return port->linkup && team_port_enabled(port);
90 }
91 
team_port_dev_txable(const struct net_device * port_dev)92 static inline bool team_port_dev_txable(const struct net_device *port_dev)
93 {
94 	struct team_port *port;
95 	bool txable;
96 
97 	rcu_read_lock();
98 	port = team_port_get_rcu(port_dev);
99 	txable = port ? team_port_txable(port) : false;
100 	rcu_read_unlock();
101 
102 	return txable;
103 }
104 
105 #ifdef CONFIG_NET_POLL_CONTROLLER
team_netpoll_send_skb(struct team_port * port,struct sk_buff * skb)106 static inline void team_netpoll_send_skb(struct team_port *port,
107 					 struct sk_buff *skb)
108 {
109 	struct netpoll *np = port->np;
110 
111 	if (np)
112 		netpoll_send_skb(np, skb);
113 }
114 #else
team_netpoll_send_skb(struct team_port * port,struct sk_buff * skb)115 static inline void team_netpoll_send_skb(struct team_port *port,
116 					 struct sk_buff *skb)
117 {
118 }
119 #endif
120 
121 struct team_mode_ops {
122 	int (*init)(struct team *team);
123 	void (*exit)(struct team *team);
124 	rx_handler_result_t (*receive)(struct team *team,
125 				       struct team_port *port,
126 				       struct sk_buff *skb);
127 	bool (*transmit)(struct team *team, struct sk_buff *skb);
128 	int (*port_enter)(struct team *team, struct team_port *port);
129 	void (*port_leave)(struct team *team, struct team_port *port);
130 	void (*port_change_dev_addr)(struct team *team, struct team_port *port);
131 	void (*port_enabled)(struct team *team, struct team_port *port);
132 	void (*port_disabled)(struct team *team, struct team_port *port);
133 };
134 
135 extern int team_modeop_port_enter(struct team *team, struct team_port *port);
136 extern void team_modeop_port_change_dev_addr(struct team *team,
137 					     struct team_port *port);
138 
139 enum team_option_type {
140 	TEAM_OPTION_TYPE_U32,
141 	TEAM_OPTION_TYPE_STRING,
142 	TEAM_OPTION_TYPE_BINARY,
143 	TEAM_OPTION_TYPE_BOOL,
144 	TEAM_OPTION_TYPE_S32,
145 };
146 
147 struct team_option_inst_info {
148 	u32 array_index;
149 	struct team_port *port; /* != NULL if per-port */
150 };
151 
152 struct team_gsetter_ctx {
153 	union {
154 		u32 u32_val;
155 		const char *str_val;
156 		struct {
157 			const void *ptr;
158 			u32 len;
159 		} bin_val;
160 		bool bool_val;
161 		s32 s32_val;
162 	} data;
163 	struct team_option_inst_info *info;
164 };
165 
166 struct team_option {
167 	struct list_head list;
168 	const char *name;
169 	bool per_port;
170 	unsigned int array_size; /* != 0 means the option is array */
171 	enum team_option_type type;
172 	int (*init)(struct team *team, struct team_option_inst_info *info);
173 	int (*getter)(struct team *team, struct team_gsetter_ctx *ctx);
174 	int (*setter)(struct team *team, struct team_gsetter_ctx *ctx);
175 };
176 
177 extern void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info);
178 extern void team_options_change_check(struct team *team);
179 
180 struct team_mode {
181 	const char *kind;
182 	struct module *owner;
183 	size_t priv_size;
184 	size_t port_priv_size;
185 	const struct team_mode_ops *ops;
186 	enum netdev_lag_tx_type lag_tx_type;
187 };
188 
189 #define TEAM_PORT_HASHBITS 4
190 #define TEAM_PORT_HASHENTRIES (1 << TEAM_PORT_HASHBITS)
191 
192 #define TEAM_MODE_PRIV_LONGS 4
193 #define TEAM_MODE_PRIV_SIZE (sizeof(long) * TEAM_MODE_PRIV_LONGS)
194 
195 struct team {
196 	struct net_device *dev; /* associated netdevice */
197 	struct team_pcpu_stats __percpu *pcpu_stats;
198 
199 	const struct header_ops *header_ops_cache;
200 
201 	struct mutex lock; /* used for overall locking, e.g. port lists write */
202 
203 	/*
204 	 * List of enabled ports and their count
205 	 */
206 	int en_port_count;
207 	struct hlist_head en_port_hlist[TEAM_PORT_HASHENTRIES];
208 
209 	struct list_head port_list; /* list of all ports */
210 
211 	struct list_head option_list;
212 	struct list_head option_inst_list; /* list of option instances */
213 
214 	const struct team_mode *mode;
215 	struct team_mode_ops ops;
216 	bool user_carrier_enabled;
217 	bool queue_override_enabled;
218 	struct list_head *qom_lists; /* array of queue override mapping lists */
219 	bool port_mtu_change_allowed;
220 	struct {
221 		unsigned int count;
222 		unsigned int interval; /* in ms */
223 		atomic_t count_pending;
224 		struct delayed_work dw;
225 	} notify_peers;
226 	struct {
227 		unsigned int count;
228 		unsigned int interval; /* in ms */
229 		atomic_t count_pending;
230 		struct delayed_work dw;
231 	} mcast_rejoin;
232 	long mode_priv[TEAM_MODE_PRIV_LONGS];
233 };
234 
team_dev_queue_xmit(struct team * team,struct team_port * port,struct sk_buff * skb)235 static inline int team_dev_queue_xmit(struct team *team, struct team_port *port,
236 				      struct sk_buff *skb)
237 {
238 	BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
239 		     sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
240 	skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
241 
242 	skb->dev = port->dev;
243 	if (unlikely(netpoll_tx_running(team->dev))) {
244 		team_netpoll_send_skb(port, skb);
245 		return 0;
246 	}
247 	return dev_queue_xmit(skb);
248 }
249 
team_port_index_hash(struct team * team,int port_index)250 static inline struct hlist_head *team_port_index_hash(struct team *team,
251 						      int port_index)
252 {
253 	return &team->en_port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)];
254 }
255 
team_get_port_by_index(struct team * team,int port_index)256 static inline struct team_port *team_get_port_by_index(struct team *team,
257 						       int port_index)
258 {
259 	struct team_port *port;
260 	struct hlist_head *head = team_port_index_hash(team, port_index);
261 
262 	hlist_for_each_entry(port, head, hlist)
263 		if (port->index == port_index)
264 			return port;
265 	return NULL;
266 }
267 
team_num_to_port_index(struct team * team,unsigned int num)268 static inline int team_num_to_port_index(struct team *team, unsigned int num)
269 {
270 	int en_port_count = READ_ONCE(team->en_port_count);
271 
272 	if (unlikely(!en_port_count))
273 		return 0;
274 	return num % en_port_count;
275 }
276 
team_get_port_by_index_rcu(struct team * team,int port_index)277 static inline struct team_port *team_get_port_by_index_rcu(struct team *team,
278 							   int port_index)
279 {
280 	struct team_port *port;
281 	struct hlist_head *head = team_port_index_hash(team, port_index);
282 
283 	hlist_for_each_entry_rcu(port, head, hlist)
284 		if (port->index == port_index)
285 			return port;
286 	return NULL;
287 }
288 
289 static inline struct team_port *
team_get_first_port_txable_rcu(struct team * team,struct team_port * port)290 team_get_first_port_txable_rcu(struct team *team, struct team_port *port)
291 {
292 	struct team_port *cur;
293 
294 	if (likely(team_port_txable(port)))
295 		return port;
296 	cur = port;
297 	list_for_each_entry_continue_rcu(cur, &team->port_list, list)
298 		if (team_port_txable(cur))
299 			return cur;
300 	list_for_each_entry_rcu(cur, &team->port_list, list) {
301 		if (cur == port)
302 			break;
303 		if (team_port_txable(cur))
304 			return cur;
305 	}
306 	return NULL;
307 }
308 
309 extern int team_options_register(struct team *team,
310 				 const struct team_option *option,
311 				 size_t option_count);
312 extern void team_options_unregister(struct team *team,
313 				    const struct team_option *option,
314 				    size_t option_count);
315 extern int team_mode_register(const struct team_mode *mode);
316 extern void team_mode_unregister(const struct team_mode *mode);
317 
318 #define TEAM_DEFAULT_NUM_TX_QUEUES 16
319 #define TEAM_DEFAULT_NUM_RX_QUEUES 16
320 
321 #define MODULE_ALIAS_TEAM_MODE(kind) MODULE_ALIAS("team-mode-" kind)
322 
323 #endif /* _LINUX_IF_TEAM_H_ */
324