]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - include/linux/if_team.h
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-bionic-kernel.git] / include / linux / if_team.h
1 /*
2 * include/linux/if_team.h - Network team device driver header
3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10 #ifndef _LINUX_IF_TEAM_H_
11 #define _LINUX_IF_TEAM_H_
12
13
14 #include <linux/netpoll.h>
15 #include <net/sch_generic.h>
16 #include <uapi/linux/if_team.h>
17
18 struct team_pcpu_stats {
19 u64 rx_packets;
20 u64 rx_bytes;
21 u64 rx_multicast;
22 u64 tx_packets;
23 u64 tx_bytes;
24 struct u64_stats_sync syncp;
25 u32 rx_dropped;
26 u32 tx_dropped;
27 };
28
29 struct team;
30
31 struct team_port {
32 struct net_device *dev;
33 struct hlist_node hlist; /* node in enabled ports hash list */
34 struct list_head list; /* node in ordinary list */
35 struct team *team;
36 int index; /* index of enabled port. If disabled, it's set to -1 */
37
38 bool linkup; /* either state.linkup or user.linkup */
39
40 struct {
41 bool linkup;
42 u32 speed;
43 u8 duplex;
44 } state;
45
46 /* Values set by userspace */
47 struct {
48 bool linkup;
49 bool linkup_enabled;
50 } user;
51
52 /* Custom gennetlink interface related flags */
53 bool changed;
54 bool removed;
55
56 /*
57 * A place for storing original values of the device before it
58 * become a port.
59 */
60 struct {
61 unsigned char dev_addr[MAX_ADDR_LEN];
62 unsigned int mtu;
63 } orig;
64
65 #ifdef CONFIG_NET_POLL_CONTROLLER
66 struct netpoll *np;
67 #endif
68
69 s32 priority; /* lower number ~ higher priority */
70 u16 queue_id;
71 struct list_head qom_list; /* node in queue override mapping list */
72 long mode_priv[0];
73 };
74
75 static inline bool team_port_enabled(struct team_port *port)
76 {
77 return port->index != -1;
78 }
79
80 static inline bool team_port_txable(struct team_port *port)
81 {
82 return port->linkup && team_port_enabled(port);
83 }
84
85 #ifdef CONFIG_NET_POLL_CONTROLLER
86 static inline void team_netpoll_send_skb(struct team_port *port,
87 struct sk_buff *skb)
88 {
89 struct netpoll *np = port->np;
90
91 if (np)
92 netpoll_send_skb(np, skb);
93 }
94 #else
95 static inline void team_netpoll_send_skb(struct team_port *port,
96 struct sk_buff *skb)
97 {
98 }
99 #endif
100
101 struct team_mode_ops {
102 int (*init)(struct team *team);
103 void (*exit)(struct team *team);
104 rx_handler_result_t (*receive)(struct team *team,
105 struct team_port *port,
106 struct sk_buff *skb);
107 bool (*transmit)(struct team *team, struct sk_buff *skb);
108 int (*port_enter)(struct team *team, struct team_port *port);
109 void (*port_leave)(struct team *team, struct team_port *port);
110 void (*port_change_dev_addr)(struct team *team, struct team_port *port);
111 void (*port_enabled)(struct team *team, struct team_port *port);
112 void (*port_disabled)(struct team *team, struct team_port *port);
113 };
114
115 enum team_option_type {
116 TEAM_OPTION_TYPE_U32,
117 TEAM_OPTION_TYPE_STRING,
118 TEAM_OPTION_TYPE_BINARY,
119 TEAM_OPTION_TYPE_BOOL,
120 TEAM_OPTION_TYPE_S32,
121 };
122
123 struct team_option_inst_info {
124 u32 array_index;
125 struct team_port *port; /* != NULL if per-port */
126 };
127
128 struct team_gsetter_ctx {
129 union {
130 u32 u32_val;
131 const char *str_val;
132 struct {
133 const void *ptr;
134 u32 len;
135 } bin_val;
136 bool bool_val;
137 s32 s32_val;
138 } data;
139 struct team_option_inst_info *info;
140 };
141
142 struct team_option {
143 struct list_head list;
144 const char *name;
145 bool per_port;
146 unsigned int array_size; /* != 0 means the option is array */
147 enum team_option_type type;
148 int (*init)(struct team *team, struct team_option_inst_info *info);
149 int (*getter)(struct team *team, struct team_gsetter_ctx *ctx);
150 int (*setter)(struct team *team, struct team_gsetter_ctx *ctx);
151 };
152
153 extern void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info);
154 extern void team_options_change_check(struct team *team);
155
156 struct team_mode {
157 const char *kind;
158 struct module *owner;
159 size_t priv_size;
160 size_t port_priv_size;
161 const struct team_mode_ops *ops;
162 };
163
164 #define TEAM_PORT_HASHBITS 4
165 #define TEAM_PORT_HASHENTRIES (1 << TEAM_PORT_HASHBITS)
166
167 #define TEAM_MODE_PRIV_LONGS 4
168 #define TEAM_MODE_PRIV_SIZE (sizeof(long) * TEAM_MODE_PRIV_LONGS)
169
170 struct team {
171 struct net_device *dev; /* associated netdevice */
172 struct team_pcpu_stats __percpu *pcpu_stats;
173
174 struct mutex lock; /* used for overall locking, e.g. port lists write */
175
176 /*
177 * List of enabled ports and their count
178 */
179 int en_port_count;
180 struct hlist_head en_port_hlist[TEAM_PORT_HASHENTRIES];
181
182 struct list_head port_list; /* list of all ports */
183
184 struct list_head option_list;
185 struct list_head option_inst_list; /* list of option instances */
186
187 const struct team_mode *mode;
188 struct team_mode_ops ops;
189 bool user_carrier_enabled;
190 bool queue_override_enabled;
191 struct list_head *qom_lists; /* array of queue override mapping lists */
192 long mode_priv[TEAM_MODE_PRIV_LONGS];
193 };
194
195 static inline int team_dev_queue_xmit(struct team *team, struct team_port *port,
196 struct sk_buff *skb)
197 {
198 BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
199 sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
200 skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
201
202 skb->dev = port->dev;
203 if (unlikely(netpoll_tx_running(team->dev))) {
204 team_netpoll_send_skb(port, skb);
205 return 0;
206 }
207 return dev_queue_xmit(skb);
208 }
209
210 static inline struct hlist_head *team_port_index_hash(struct team *team,
211 int port_index)
212 {
213 return &team->en_port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)];
214 }
215
216 static inline struct team_port *team_get_port_by_index(struct team *team,
217 int port_index)
218 {
219 struct hlist_node *p;
220 struct team_port *port;
221 struct hlist_head *head = team_port_index_hash(team, port_index);
222
223 hlist_for_each_entry(port, p, head, hlist)
224 if (port->index == port_index)
225 return port;
226 return NULL;
227 }
228 static inline struct team_port *team_get_port_by_index_rcu(struct team *team,
229 int port_index)
230 {
231 struct hlist_node *p;
232 struct team_port *port;
233 struct hlist_head *head = team_port_index_hash(team, port_index);
234
235 hlist_for_each_entry_rcu(port, p, head, hlist)
236 if (port->index == port_index)
237 return port;
238 return NULL;
239 }
240
241 extern int team_port_set_team_dev_addr(struct team_port *port);
242 extern int team_options_register(struct team *team,
243 const struct team_option *option,
244 size_t option_count);
245 extern void team_options_unregister(struct team *team,
246 const struct team_option *option,
247 size_t option_count);
248 extern int team_mode_register(const struct team_mode *mode);
249 extern void team_mode_unregister(const struct team_mode *mode);
250
251 #define TEAM_DEFAULT_NUM_TX_QUEUES 16
252 #define TEAM_DEFAULT_NUM_RX_QUEUES 16
253
254 #endif /* _LINUX_IF_TEAM_H_ */