]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/infiniband/core/nldev.c
RDMA/netink: Export lids and sm_lids
[mirror_ubuntu-bionic-kernel.git] / drivers / infiniband / core / nldev.c
CommitLineData
6c80b41a
LR
1/*
2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Neither the names of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * Alternatively, this software may be distributed under the terms of the
17 * GNU General Public License ("GPL") version 2 as published by the Free
18 * Software Foundation.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
b4c598a6 33#include <net/netlink.h>
6c80b41a
LR
34#include <rdma/rdma_netlink.h>
35
36#include "core_priv.h"
37
b4c598a6
LR
38static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
39 [RDMA_NLDEV_ATTR_DEV_INDEX] = { .type = NLA_U32 },
40 [RDMA_NLDEV_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING,
41 .len = IB_DEVICE_NAME_MAX - 1},
42 [RDMA_NLDEV_ATTR_PORT_INDEX] = { .type = NLA_U32 },
8621a7e3
LR
43 [RDMA_NLDEV_ATTR_FW_VERSION] = { .type = NLA_NUL_STRING,
44 .len = IB_FW_VERSION_NAME_MAX - 1},
1aaff896
LR
45 [RDMA_NLDEV_ATTR_NODE_GUID] = { .type = NLA_U64 },
46 [RDMA_NLDEV_ATTR_SYS_IMAGE_GUID] = { .type = NLA_U64 },
12026fbb 47 [RDMA_NLDEV_ATTR_SUBNET_PREFIX] = { .type = NLA_U64 },
80a06dd3
LR
48 [RDMA_NLDEV_ATTR_LID] = { .type = NLA_U32 },
49 [RDMA_NLDEV_ATTR_SM_LID] = { .type = NLA_U32 },
b4c598a6
LR
50};
51
52static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
53{
8621a7e3
LR
54 char fw[IB_FW_VERSION_NAME_MAX];
55
b4c598a6
LR
56 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
57 return -EMSGSIZE;
58 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, device->name))
59 return -EMSGSIZE;
60 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device)))
61 return -EMSGSIZE;
ac505253
LR
62
63 BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64));
64 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
65 device->attrs.device_cap_flags, 0))
66 return -EMSGSIZE;
67
8621a7e3
LR
68 ib_get_device_fw_str(device, fw);
69 /* Device without FW has strlen(fw) */
70 if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw))
71 return -EMSGSIZE;
72
1aaff896
LR
73 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID,
74 be64_to_cpu(device->node_guid), 0))
75 return -EMSGSIZE;
76 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID,
77 be64_to_cpu(device->attrs.sys_image_guid), 0))
78 return -EMSGSIZE;
b4c598a6
LR
79 return 0;
80}
81
7d02f605
LR
82static int fill_port_info(struct sk_buff *msg,
83 struct ib_device *device, u32 port)
84{
ac505253
LR
85 struct ib_port_attr attr;
86 int ret;
87
7d02f605
LR
88 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
89 return -EMSGSIZE;
90 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, device->name))
91 return -EMSGSIZE;
92 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
93 return -EMSGSIZE;
ac505253
LR
94
95 ret = ib_query_port(device, port, &attr);
96 if (ret)
97 return ret;
98
99 BUILD_BUG_ON(sizeof(attr.port_cap_flags) > sizeof(u64));
100 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
101 (u64)attr.port_cap_flags, 0))
102 return -EMSGSIZE;
12026fbb
LR
103 if (rdma_protocol_ib(device, port) &&
104 nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
105 attr.subnet_prefix, 0))
106 return -EMSGSIZE;
80a06dd3
LR
107 if (rdma_protocol_ib(device, port)) {
108 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid))
109 return -EMSGSIZE;
110 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid))
111 return -EMSGSIZE;
112 }
7d02f605
LR
113 return 0;
114}
115
e5c9469e
LR
116static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
117 struct netlink_ext_ack *extack)
118{
119 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
120 struct ib_device *device;
121 struct sk_buff *msg;
122 u32 index;
123 int err;
124
125 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
126 nldev_policy, extack);
127 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
128 return -EINVAL;
129
130 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
131
132 device = __ib_device_get_by_index(index);
133 if (!device)
134 return -EINVAL;
135
136 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
137 if (!msg)
138 return -ENOMEM;
139
140 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
141 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
142 0, 0);
143
144 err = fill_dev_info(msg, device);
145 if (err) {
146 nlmsg_free(msg);
147 return err;
148 }
149
150 nlmsg_end(msg, nlh);
151
152 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
153}
154
b4c598a6
LR
155static int _nldev_get_dumpit(struct ib_device *device,
156 struct sk_buff *skb,
157 struct netlink_callback *cb,
158 unsigned int idx)
159{
160 int start = cb->args[0];
161 struct nlmsghdr *nlh;
162
163 if (idx < start)
164 return 0;
165
166 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
167 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
168 0, NLM_F_MULTI);
169
170 if (fill_dev_info(skb, device)) {
171 nlmsg_cancel(skb, nlh);
172 goto out;
173 }
174
175 nlmsg_end(skb, nlh);
176
177 idx++;
178
179out: cb->args[0] = idx;
180 return skb->len;
181}
182
183static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
184{
185 /*
186 * There is no need to take lock, because
187 * we are relying on ib_core's lists_rwsem
188 */
189 return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
190}
191
c3f66f7b
LR
192static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
193 struct netlink_ext_ack *extack)
194{
195 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
196 struct ib_device *device;
197 struct sk_buff *msg;
198 u32 index;
199 u32 port;
200 int err;
201
202 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
203 nldev_policy, extack);
204 if (err || !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
205 return -EINVAL;
206
207 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
208 device = __ib_device_get_by_index(index);
209 if (!device)
210 return -EINVAL;
211
212 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
213 if (!rdma_is_port_valid(device, port))
214 return -EINVAL;
215
216 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
217 if (!msg)
218 return -ENOMEM;
219
220 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
221 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
222 0, 0);
223
224 err = fill_port_info(msg, device, port);
225 if (err) {
226 nlmsg_free(msg);
227 return err;
228 }
229
230 nlmsg_end(msg, nlh);
231
232 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
233}
234
7d02f605
LR
235static int nldev_port_get_dumpit(struct sk_buff *skb,
236 struct netlink_callback *cb)
237{
238 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
239 struct ib_device *device;
240 int start = cb->args[0];
241 struct nlmsghdr *nlh;
242 u32 idx = 0;
243 u32 ifindex;
244 int err;
245 u32 p;
246
247 err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
248 nldev_policy, NULL);
249 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
250 return -EINVAL;
251
252 ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
253 device = __ib_device_get_by_index(ifindex);
254 if (!device)
255 return -EINVAL;
256
257 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
258 /*
259 * The dumpit function returns all information from specific
260 * index. This specific index is taken from the netlink
261 * messages request sent by user and it is available
262 * in cb->args[0].
263 *
264 * Usually, the user doesn't fill this field and it causes
265 * to return everything.
266 *
267 */
268 if (idx < start) {
269 idx++;
270 continue;
271 }
272
273 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
274 cb->nlh->nlmsg_seq,
275 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
276 RDMA_NLDEV_CMD_PORT_GET),
277 0, NLM_F_MULTI);
278
279 if (fill_port_info(skb, device, p)) {
280 nlmsg_cancel(skb, nlh);
281 goto out;
282 }
283 idx++;
284 nlmsg_end(skb, nlh);
285 }
286
287out: cb->args[0] = idx;
288 return skb->len;
289}
290
b4c598a6
LR
291static const struct rdma_nl_cbs nldev_cb_table[] = {
292 [RDMA_NLDEV_CMD_GET] = {
e5c9469e 293 .doit = nldev_get_doit,
b4c598a6
LR
294 .dump = nldev_get_dumpit,
295 },
7d02f605 296 [RDMA_NLDEV_CMD_PORT_GET] = {
c3f66f7b 297 .doit = nldev_port_get_doit,
7d02f605
LR
298 .dump = nldev_port_get_dumpit,
299 },
b4c598a6
LR
300};
301
6c80b41a
LR
302void __init nldev_init(void)
303{
b4c598a6 304 rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
6c80b41a
LR
305}
306
307void __exit nldev_exit(void)
308{
309 rdma_nl_unregister(RDMA_NL_NLDEV);
310}