1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2018 - 2021 Intel Corporation
7 #include <net/cfg80211.h>
12 static int pmsr_parse_ftm(struct cfg80211_registered_device
*rdev
,
13 struct nlattr
*ftmreq
,
14 struct cfg80211_pmsr_request_peer
*out
,
15 struct genl_info
*info
)
17 const struct cfg80211_pmsr_capabilities
*capa
= rdev
->wiphy
.pmsr_capa
;
18 struct nlattr
*tb
[NL80211_PMSR_FTM_REQ_ATTR_MAX
+ 1];
19 u32 preamble
= NL80211_PREAMBLE_DMG
; /* only optional in DMG */
21 /* validate existing data */
22 if (!(rdev
->wiphy
.pmsr_capa
->ftm
.bandwidths
& BIT(out
->chandef
.width
))) {
23 NL_SET_ERR_MSG(info
->extack
, "FTM: unsupported bandwidth");
27 /* no validation needed - was already done via nested policy */
28 nla_parse_nested_deprecated(tb
, NL80211_PMSR_FTM_REQ_ATTR_MAX
, ftmreq
,
31 if (tb
[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE
])
32 preamble
= nla_get_u32(tb
[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE
]);
34 /* set up values - struct is 0-initialized */
35 out
->ftm
.requested
= true;
37 switch (out
->chandef
.chan
->band
) {
38 case NL80211_BAND_60GHZ
:
42 if (!tb
[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE
]) {
43 NL_SET_ERR_MSG(info
->extack
,
44 "FTM: must specify preamble");
49 if (!(capa
->ftm
.preambles
& BIT(preamble
))) {
50 NL_SET_ERR_MSG_ATTR(info
->extack
,
51 tb
[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE
],
52 "FTM: invalid preamble");
56 out
->ftm
.preamble
= preamble
;
58 out
->ftm
.burst_period
= 0;
59 if (tb
[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD
])
60 out
->ftm
.burst_period
=
61 nla_get_u32(tb
[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD
]);
63 out
->ftm
.asap
= !!tb
[NL80211_PMSR_FTM_REQ_ATTR_ASAP
];
64 if (out
->ftm
.asap
&& !capa
->ftm
.asap
) {
65 NL_SET_ERR_MSG_ATTR(info
->extack
,
66 tb
[NL80211_PMSR_FTM_REQ_ATTR_ASAP
],
67 "FTM: ASAP mode not supported");
71 if (!out
->ftm
.asap
&& !capa
->ftm
.non_asap
) {
72 NL_SET_ERR_MSG(info
->extack
,
73 "FTM: non-ASAP mode not supported");
77 out
->ftm
.num_bursts_exp
= 0;
78 if (tb
[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP
])
79 out
->ftm
.num_bursts_exp
=
80 nla_get_u32(tb
[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP
]);
82 if (capa
->ftm
.max_bursts_exponent
>= 0 &&
83 out
->ftm
.num_bursts_exp
> capa
->ftm
.max_bursts_exponent
) {
84 NL_SET_ERR_MSG_ATTR(info
->extack
,
85 tb
[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP
],
86 "FTM: max NUM_BURSTS_EXP must be set lower than the device limit");
90 out
->ftm
.burst_duration
= 15;
91 if (tb
[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION
])
92 out
->ftm
.burst_duration
=
93 nla_get_u32(tb
[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION
]);
95 out
->ftm
.ftms_per_burst
= 0;
96 if (tb
[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST
])
97 out
->ftm
.ftms_per_burst
=
98 nla_get_u32(tb
[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST
]);
100 if (capa
->ftm
.max_ftms_per_burst
&&
101 (out
->ftm
.ftms_per_burst
> capa
->ftm
.max_ftms_per_burst
||
102 out
->ftm
.ftms_per_burst
== 0)) {
103 NL_SET_ERR_MSG_ATTR(info
->extack
,
104 tb
[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST
],
105 "FTM: FTMs per burst must be set lower than the device limit but non-zero");
109 out
->ftm
.ftmr_retries
= 3;
110 if (tb
[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES
])
111 out
->ftm
.ftmr_retries
=
112 nla_get_u32(tb
[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES
]);
114 out
->ftm
.request_lci
= !!tb
[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI
];
115 if (out
->ftm
.request_lci
&& !capa
->ftm
.request_lci
) {
116 NL_SET_ERR_MSG_ATTR(info
->extack
,
117 tb
[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI
],
118 "FTM: LCI request not supported");
121 out
->ftm
.request_civicloc
=
122 !!tb
[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC
];
123 if (out
->ftm
.request_civicloc
&& !capa
->ftm
.request_civicloc
) {
124 NL_SET_ERR_MSG_ATTR(info
->extack
,
125 tb
[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC
],
126 "FTM: civic location request not supported");
129 out
->ftm
.trigger_based
=
130 !!tb
[NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED
];
131 if (out
->ftm
.trigger_based
&& !capa
->ftm
.trigger_based
) {
132 NL_SET_ERR_MSG_ATTR(info
->extack
,
133 tb
[NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED
],
134 "FTM: trigger based ranging is not supported");
138 out
->ftm
.non_trigger_based
=
139 !!tb
[NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED
];
140 if (out
->ftm
.non_trigger_based
&& !capa
->ftm
.non_trigger_based
) {
141 NL_SET_ERR_MSG_ATTR(info
->extack
,
142 tb
[NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED
],
143 "FTM: trigger based ranging is not supported");
147 if (out
->ftm
.trigger_based
&& out
->ftm
.non_trigger_based
) {
148 NL_SET_ERR_MSG(info
->extack
,
149 "FTM: can't set both trigger based and non trigger based");
153 if ((out
->ftm
.trigger_based
|| out
->ftm
.non_trigger_based
) &&
154 out
->ftm
.preamble
!= NL80211_PREAMBLE_HE
) {
155 NL_SET_ERR_MSG_ATTR(info
->extack
,
156 tb
[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE
],
157 "FTM: non EDCA based ranging must use HE preamble");
161 out
->ftm
.lmr_feedback
=
162 !!tb
[NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK
];
163 if (!out
->ftm
.trigger_based
&& !out
->ftm
.non_trigger_based
&&
164 out
->ftm
.lmr_feedback
) {
165 NL_SET_ERR_MSG_ATTR(info
->extack
,
166 tb
[NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK
],
167 "FTM: LMR feedback set for EDCA based ranging");
174 static int pmsr_parse_peer(struct cfg80211_registered_device
*rdev
,
176 struct cfg80211_pmsr_request_peer
*out
,
177 struct genl_info
*info
)
179 struct nlattr
*tb
[NL80211_PMSR_PEER_ATTR_MAX
+ 1];
180 struct nlattr
*req
[NL80211_PMSR_REQ_ATTR_MAX
+ 1];
184 /* no validation needed - was already done via nested policy */
185 nla_parse_nested_deprecated(tb
, NL80211_PMSR_PEER_ATTR_MAX
, peer
,
188 if (!tb
[NL80211_PMSR_PEER_ATTR_ADDR
] ||
189 !tb
[NL80211_PMSR_PEER_ATTR_CHAN
] ||
190 !tb
[NL80211_PMSR_PEER_ATTR_REQ
]) {
191 NL_SET_ERR_MSG_ATTR(info
->extack
, peer
,
192 "insufficient peer data");
196 memcpy(out
->addr
, nla_data(tb
[NL80211_PMSR_PEER_ATTR_ADDR
]), ETH_ALEN
);
198 /* reuse info->attrs */
199 memset(info
->attrs
, 0, sizeof(*info
->attrs
) * (NL80211_ATTR_MAX
+ 1));
200 err
= nla_parse_nested_deprecated(info
->attrs
, NL80211_ATTR_MAX
,
201 tb
[NL80211_PMSR_PEER_ATTR_CHAN
],
206 err
= nl80211_parse_chandef(rdev
, info
, &out
->chandef
);
210 /* no validation needed - was already done via nested policy */
211 nla_parse_nested_deprecated(req
, NL80211_PMSR_REQ_ATTR_MAX
,
212 tb
[NL80211_PMSR_PEER_ATTR_REQ
], NULL
,
215 if (!req
[NL80211_PMSR_REQ_ATTR_DATA
]) {
216 NL_SET_ERR_MSG_ATTR(info
->extack
,
217 tb
[NL80211_PMSR_PEER_ATTR_REQ
],
218 "missing request type/data");
222 if (req
[NL80211_PMSR_REQ_ATTR_GET_AP_TSF
])
223 out
->report_ap_tsf
= true;
225 if (out
->report_ap_tsf
&& !rdev
->wiphy
.pmsr_capa
->report_ap_tsf
) {
226 NL_SET_ERR_MSG_ATTR(info
->extack
,
227 req
[NL80211_PMSR_REQ_ATTR_GET_AP_TSF
],
228 "reporting AP TSF is not supported");
232 nla_for_each_nested(treq
, req
[NL80211_PMSR_REQ_ATTR_DATA
], rem
) {
233 switch (nla_type(treq
)) {
234 case NL80211_PMSR_TYPE_FTM
:
235 err
= pmsr_parse_ftm(rdev
, treq
, out
, info
);
238 NL_SET_ERR_MSG_ATTR(info
->extack
, treq
,
239 "unsupported measurement type");
250 int nl80211_pmsr_start(struct sk_buff
*skb
, struct genl_info
*info
)
252 struct nlattr
*reqattr
= info
->attrs
[NL80211_ATTR_PEER_MEASUREMENTS
];
253 struct cfg80211_registered_device
*rdev
= info
->user_ptr
[0];
254 struct wireless_dev
*wdev
= info
->user_ptr
[1];
255 struct cfg80211_pmsr_request
*req
;
256 struct nlattr
*peers
, *peer
;
257 int count
, rem
, err
, idx
;
259 if (!rdev
->wiphy
.pmsr_capa
)
265 peers
= nla_find(nla_data(reqattr
), nla_len(reqattr
),
266 NL80211_PMSR_ATTR_PEERS
);
271 nla_for_each_nested(peer
, peers
, rem
) {
274 if (count
> rdev
->wiphy
.pmsr_capa
->max_peers
) {
275 NL_SET_ERR_MSG_ATTR(info
->extack
, peer
,
276 "Too many peers used");
281 req
= kzalloc(struct_size(req
, peers
, count
), GFP_KERNEL
);
285 if (info
->attrs
[NL80211_ATTR_TIMEOUT
])
286 req
->timeout
= nla_get_u32(info
->attrs
[NL80211_ATTR_TIMEOUT
]);
288 if (info
->attrs
[NL80211_ATTR_MAC
]) {
289 if (!rdev
->wiphy
.pmsr_capa
->randomize_mac_addr
) {
290 NL_SET_ERR_MSG_ATTR(info
->extack
,
291 info
->attrs
[NL80211_ATTR_MAC
],
292 "device cannot randomize MAC address");
297 err
= nl80211_parse_random_mac(info
->attrs
, req
->mac_addr
,
302 memcpy(req
->mac_addr
, wdev_address(wdev
), ETH_ALEN
);
303 eth_broadcast_addr(req
->mac_addr_mask
);
307 nla_for_each_nested(peer
, peers
, rem
) {
308 /* NB: this reuses info->attrs, but we no longer need it */
309 err
= pmsr_parse_peer(rdev
, peer
, &req
->peers
[idx
], info
);
315 req
->n_peers
= count
;
316 req
->cookie
= cfg80211_assign_cookie(rdev
);
317 req
->nl_portid
= info
->snd_portid
;
319 err
= rdev_start_pmsr(rdev
, wdev
, req
);
323 list_add_tail(&req
->list
, &wdev
->pmsr_list
);
325 nl_set_extack_cookie_u64(info
->extack
, req
->cookie
);
332 void cfg80211_pmsr_complete(struct wireless_dev
*wdev
,
333 struct cfg80211_pmsr_request
*req
,
336 struct cfg80211_registered_device
*rdev
= wiphy_to_rdev(wdev
->wiphy
);
340 trace_cfg80211_pmsr_complete(wdev
->wiphy
, wdev
, req
->cookie
);
342 msg
= nlmsg_new(NLMSG_DEFAULT_SIZE
, gfp
);
346 hdr
= nl80211hdr_put(msg
, 0, 0, 0,
347 NL80211_CMD_PEER_MEASUREMENT_COMPLETE
);
351 if (nla_put_u32(msg
, NL80211_ATTR_WIPHY
, rdev
->wiphy_idx
) ||
352 nla_put_u64_64bit(msg
, NL80211_ATTR_WDEV
, wdev_id(wdev
),
356 if (nla_put_u64_64bit(msg
, NL80211_ATTR_COOKIE
, req
->cookie
,
360 genlmsg_end(msg
, hdr
);
361 genlmsg_unicast(wiphy_net(wdev
->wiphy
), msg
, req
->nl_portid
);
366 spin_lock_bh(&wdev
->pmsr_lock
);
367 list_del(&req
->list
);
368 spin_unlock_bh(&wdev
->pmsr_lock
);
371 EXPORT_SYMBOL_GPL(cfg80211_pmsr_complete
);
373 static int nl80211_pmsr_send_ftm_res(struct sk_buff
*msg
,
374 struct cfg80211_pmsr_result
*res
)
376 if (res
->status
== NL80211_PMSR_STATUS_FAILURE
) {
377 if (nla_put_u32(msg
, NL80211_PMSR_FTM_RESP_ATTR_FAIL_REASON
,
378 res
->ftm
.failure_reason
))
381 if (res
->ftm
.failure_reason
==
382 NL80211_PMSR_FTM_FAILURE_PEER_BUSY
&&
383 res
->ftm
.busy_retry_time
&&
384 nla_put_u32(msg
, NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME
,
385 res
->ftm
.busy_retry_time
))
391 #define PUT(tp, attr, val) \
393 if (nla_put_##tp(msg, \
394 NL80211_PMSR_FTM_RESP_ATTR_##attr, \
399 #define PUTOPT(tp, attr, val) \
401 if (res->ftm.val##_valid) \
402 PUT(tp, attr, val); \
405 #define PUT_U64(attr, val) \
407 if (nla_put_u64_64bit(msg, \
408 NL80211_PMSR_FTM_RESP_ATTR_##attr,\
410 NL80211_PMSR_FTM_RESP_ATTR_PAD)) \
414 #define PUTOPT_U64(attr, val) \
416 if (res->ftm.val##_valid) \
417 PUT_U64(attr, val); \
420 if (res
->ftm
.burst_index
>= 0)
421 PUT(u32
, BURST_INDEX
, burst_index
);
422 PUTOPT(u32
, NUM_FTMR_ATTEMPTS
, num_ftmr_attempts
);
423 PUTOPT(u32
, NUM_FTMR_SUCCESSES
, num_ftmr_successes
);
424 PUT(u8
, NUM_BURSTS_EXP
, num_bursts_exp
);
425 PUT(u8
, BURST_DURATION
, burst_duration
);
426 PUT(u8
, FTMS_PER_BURST
, ftms_per_burst
);
427 PUTOPT(s32
, RSSI_AVG
, rssi_avg
);
428 PUTOPT(s32
, RSSI_SPREAD
, rssi_spread
);
429 if (res
->ftm
.tx_rate_valid
&&
430 !nl80211_put_sta_rate(msg
, &res
->ftm
.tx_rate
,
431 NL80211_PMSR_FTM_RESP_ATTR_TX_RATE
))
433 if (res
->ftm
.rx_rate_valid
&&
434 !nl80211_put_sta_rate(msg
, &res
->ftm
.rx_rate
,
435 NL80211_PMSR_FTM_RESP_ATTR_RX_RATE
))
437 PUTOPT_U64(RTT_AVG
, rtt_avg
);
438 PUTOPT_U64(RTT_VARIANCE
, rtt_variance
);
439 PUTOPT_U64(RTT_SPREAD
, rtt_spread
);
440 PUTOPT_U64(DIST_AVG
, dist_avg
);
441 PUTOPT_U64(DIST_VARIANCE
, dist_variance
);
442 PUTOPT_U64(DIST_SPREAD
, dist_spread
);
443 if (res
->ftm
.lci
&& res
->ftm
.lci_len
&&
444 nla_put(msg
, NL80211_PMSR_FTM_RESP_ATTR_LCI
,
445 res
->ftm
.lci_len
, res
->ftm
.lci
))
447 if (res
->ftm
.civicloc
&& res
->ftm
.civicloc_len
&&
448 nla_put(msg
, NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC
,
449 res
->ftm
.civicloc_len
, res
->ftm
.civicloc
))
461 static int nl80211_pmsr_send_result(struct sk_buff
*msg
,
462 struct cfg80211_pmsr_result
*res
)
464 struct nlattr
*pmsr
, *peers
, *peer
, *resp
, *data
, *typedata
;
466 pmsr
= nla_nest_start_noflag(msg
, NL80211_ATTR_PEER_MEASUREMENTS
);
470 peers
= nla_nest_start_noflag(msg
, NL80211_PMSR_ATTR_PEERS
);
474 peer
= nla_nest_start_noflag(msg
, 1);
478 if (nla_put(msg
, NL80211_PMSR_PEER_ATTR_ADDR
, ETH_ALEN
, res
->addr
))
481 resp
= nla_nest_start_noflag(msg
, NL80211_PMSR_PEER_ATTR_RESP
);
485 if (nla_put_u32(msg
, NL80211_PMSR_RESP_ATTR_STATUS
, res
->status
) ||
486 nla_put_u64_64bit(msg
, NL80211_PMSR_RESP_ATTR_HOST_TIME
,
487 res
->host_time
, NL80211_PMSR_RESP_ATTR_PAD
))
490 if (res
->ap_tsf_valid
&&
491 nla_put_u64_64bit(msg
, NL80211_PMSR_RESP_ATTR_AP_TSF
,
492 res
->ap_tsf
, NL80211_PMSR_RESP_ATTR_PAD
))
495 if (res
->final
&& nla_put_flag(msg
, NL80211_PMSR_RESP_ATTR_FINAL
))
498 data
= nla_nest_start_noflag(msg
, NL80211_PMSR_RESP_ATTR_DATA
);
502 typedata
= nla_nest_start_noflag(msg
, res
->type
);
507 case NL80211_PMSR_TYPE_FTM
:
508 if (nl80211_pmsr_send_ftm_res(msg
, res
))
515 nla_nest_end(msg
, typedata
);
516 nla_nest_end(msg
, data
);
517 nla_nest_end(msg
, resp
);
518 nla_nest_end(msg
, peer
);
519 nla_nest_end(msg
, peers
);
520 nla_nest_end(msg
, pmsr
);
527 void cfg80211_pmsr_report(struct wireless_dev
*wdev
,
528 struct cfg80211_pmsr_request
*req
,
529 struct cfg80211_pmsr_result
*result
,
532 struct cfg80211_registered_device
*rdev
= wiphy_to_rdev(wdev
->wiphy
);
537 trace_cfg80211_pmsr_report(wdev
->wiphy
, wdev
, req
->cookie
,
541 * Currently, only variable items are LCI and civic location,
542 * both of which are reasonably short so we don't need to
543 * worry about them here for the allocation.
545 msg
= nlmsg_new(NLMSG_DEFAULT_SIZE
, gfp
);
549 hdr
= nl80211hdr_put(msg
, 0, 0, 0, NL80211_CMD_PEER_MEASUREMENT_RESULT
);
553 if (nla_put_u32(msg
, NL80211_ATTR_WIPHY
, rdev
->wiphy_idx
) ||
554 nla_put_u64_64bit(msg
, NL80211_ATTR_WDEV
, wdev_id(wdev
),
558 if (nla_put_u64_64bit(msg
, NL80211_ATTR_COOKIE
, req
->cookie
,
562 err
= nl80211_pmsr_send_result(msg
, result
);
564 pr_err_ratelimited("peer measurement result: message didn't fit!");
568 genlmsg_end(msg
, hdr
);
569 genlmsg_unicast(wiphy_net(wdev
->wiphy
), msg
, req
->nl_portid
);
574 EXPORT_SYMBOL_GPL(cfg80211_pmsr_report
);
576 static void cfg80211_pmsr_process_abort(struct wireless_dev
*wdev
)
578 struct cfg80211_registered_device
*rdev
= wiphy_to_rdev(wdev
->wiphy
);
579 struct cfg80211_pmsr_request
*req
, *tmp
;
580 LIST_HEAD(free_list
);
582 lockdep_assert_held(&wdev
->mtx
);
584 spin_lock_bh(&wdev
->pmsr_lock
);
585 list_for_each_entry_safe(req
, tmp
, &wdev
->pmsr_list
, list
) {
588 list_move_tail(&req
->list
, &free_list
);
590 spin_unlock_bh(&wdev
->pmsr_lock
);
592 list_for_each_entry_safe(req
, tmp
, &free_list
, list
) {
593 rdev_abort_pmsr(rdev
, wdev
, req
);
599 void cfg80211_pmsr_free_wk(struct work_struct
*work
)
601 struct wireless_dev
*wdev
= container_of(work
, struct wireless_dev
,
605 cfg80211_pmsr_process_abort(wdev
);
609 void cfg80211_pmsr_wdev_down(struct wireless_dev
*wdev
)
611 struct cfg80211_pmsr_request
*req
;
614 spin_lock_bh(&wdev
->pmsr_lock
);
615 list_for_each_entry(req
, &wdev
->pmsr_list
, list
) {
619 spin_unlock_bh(&wdev
->pmsr_lock
);
622 cfg80211_pmsr_process_abort(wdev
);
624 WARN_ON(!list_empty(&wdev
->pmsr_list
));
627 void cfg80211_release_pmsr(struct wireless_dev
*wdev
, u32 portid
)
629 struct cfg80211_pmsr_request
*req
;
631 spin_lock_bh(&wdev
->pmsr_lock
);
632 list_for_each_entry(req
, &wdev
->pmsr_list
, list
) {
633 if (req
->nl_portid
== portid
) {
635 schedule_work(&wdev
->pmsr_free_wk
);
638 spin_unlock_bh(&wdev
->pmsr_lock
);
641 #endif /* __PMSR_H */