]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/firewire/core-topology.c
scsi: ufs: core: Exclude UECxx from SFR dump list
[mirror_ubuntu-jammy-kernel.git] / drivers / firewire / core-topology.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Incremental bus scan, based on bus topology
4 *
5 * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 */
7
8 #include <linux/bug.h>
9 #include <linux/errno.h>
10 #include <linux/firewire.h>
11 #include <linux/firewire-constants.h>
12 #include <linux/jiffies.h>
13 #include <linux/kernel.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18
19 #include <linux/atomic.h>
20 #include <asm/byteorder.h>
21
22 #include "core.h"
23
24 #define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f)
25 #define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01)
26 #define SELF_ID_LINK_ON(q) (((q) >> 22) & 0x01)
27 #define SELF_ID_GAP_COUNT(q) (((q) >> 16) & 0x3f)
28 #define SELF_ID_PHY_SPEED(q) (((q) >> 14) & 0x03)
29 #define SELF_ID_CONTENDER(q) (((q) >> 11) & 0x01)
30 #define SELF_ID_PHY_INITIATOR(q) (((q) >> 1) & 0x01)
31 #define SELF_ID_MORE_PACKETS(q) (((q) >> 0) & 0x01)
32
33 #define SELF_ID_EXT_SEQUENCE(q) (((q) >> 20) & 0x07)
34
35 #define SELFID_PORT_CHILD 0x3
36 #define SELFID_PORT_PARENT 0x2
37 #define SELFID_PORT_NCONN 0x1
38 #define SELFID_PORT_NONE 0x0
39
40 static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
41 {
42 u32 q;
43 int port_type, shift, seq;
44
45 *total_port_count = 0;
46 *child_port_count = 0;
47
48 shift = 6;
49 q = *sid;
50 seq = 0;
51
52 while (1) {
53 port_type = (q >> shift) & 0x03;
54 switch (port_type) {
55 case SELFID_PORT_CHILD:
56 (*child_port_count)++;
57 fallthrough;
58 case SELFID_PORT_PARENT:
59 case SELFID_PORT_NCONN:
60 (*total_port_count)++;
61 fallthrough;
62 case SELFID_PORT_NONE:
63 break;
64 }
65
66 shift -= 2;
67 if (shift == 0) {
68 if (!SELF_ID_MORE_PACKETS(q))
69 return sid + 1;
70
71 shift = 16;
72 sid++;
73 q = *sid;
74
75 /*
76 * Check that the extra packets actually are
77 * extended self ID packets and that the
78 * sequence numbers in the extended self ID
79 * packets increase as expected.
80 */
81
82 if (!SELF_ID_EXTENDED(q) ||
83 seq != SELF_ID_EXT_SEQUENCE(q))
84 return NULL;
85
86 seq++;
87 }
88 }
89 }
90
91 static int get_port_type(u32 *sid, int port_index)
92 {
93 int index, shift;
94
95 index = (port_index + 5) / 8;
96 shift = 16 - ((port_index + 5) & 7) * 2;
97 return (sid[index] >> shift) & 0x03;
98 }
99
100 static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
101 {
102 struct fw_node *node;
103
104 node = kzalloc(struct_size(node, ports, port_count), GFP_ATOMIC);
105 if (node == NULL)
106 return NULL;
107
108 node->color = color;
109 node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid);
110 node->link_on = SELF_ID_LINK_ON(sid);
111 node->phy_speed = SELF_ID_PHY_SPEED(sid);
112 node->initiated_reset = SELF_ID_PHY_INITIATOR(sid);
113 node->port_count = port_count;
114
115 refcount_set(&node->ref_count, 1);
116 INIT_LIST_HEAD(&node->link);
117
118 return node;
119 }
120
121 /*
122 * Compute the maximum hop count for this node and it's children. The
123 * maximum hop count is the maximum number of connections between any
124 * two nodes in the subtree rooted at this node. We need this for
125 * setting the gap count. As we build the tree bottom up in
126 * build_tree() below, this is fairly easy to do: for each node we
127 * maintain the max hop count and the max depth, ie the number of hops
128 * to the furthest leaf. Computing the max hop count breaks down into
129 * two cases: either the path goes through this node, in which case
130 * the hop count is the sum of the two biggest child depths plus 2.
131 * Or it could be the case that the max hop path is entirely
132 * containted in a child tree, in which case the max hop count is just
133 * the max hop count of this child.
134 */
135 static void update_hop_count(struct fw_node *node)
136 {
137 int depths[2] = { -1, -1 };
138 int max_child_hops = 0;
139 int i;
140
141 for (i = 0; i < node->port_count; i++) {
142 if (node->ports[i] == NULL)
143 continue;
144
145 if (node->ports[i]->max_hops > max_child_hops)
146 max_child_hops = node->ports[i]->max_hops;
147
148 if (node->ports[i]->max_depth > depths[0]) {
149 depths[1] = depths[0];
150 depths[0] = node->ports[i]->max_depth;
151 } else if (node->ports[i]->max_depth > depths[1])
152 depths[1] = node->ports[i]->max_depth;
153 }
154
155 node->max_depth = depths[0] + 1;
156 node->max_hops = max(max_child_hops, depths[0] + depths[1] + 2);
157 }
158
159 static inline struct fw_node *fw_node(struct list_head *l)
160 {
161 return list_entry(l, struct fw_node, link);
162 }
163
164 /*
165 * This function builds the tree representation of the topology given
166 * by the self IDs from the latest bus reset. During the construction
167 * of the tree, the function checks that the self IDs are valid and
168 * internally consistent. On success this function returns the
169 * fw_node corresponding to the local card otherwise NULL.
170 */
171 static struct fw_node *build_tree(struct fw_card *card,
172 u32 *sid, int self_id_count)
173 {
174 struct fw_node *node, *child, *local_node, *irm_node;
175 struct list_head stack, *h;
176 u32 *next_sid, *end, q;
177 int i, port_count, child_port_count, phy_id, parent_count, stack_depth;
178 int gap_count;
179 bool beta_repeaters_present;
180
181 local_node = NULL;
182 node = NULL;
183 INIT_LIST_HEAD(&stack);
184 stack_depth = 0;
185 end = sid + self_id_count;
186 phy_id = 0;
187 irm_node = NULL;
188 gap_count = SELF_ID_GAP_COUNT(*sid);
189 beta_repeaters_present = false;
190
191 while (sid < end) {
192 next_sid = count_ports(sid, &port_count, &child_port_count);
193
194 if (next_sid == NULL) {
195 fw_err(card, "inconsistent extended self IDs\n");
196 return NULL;
197 }
198
199 q = *sid;
200 if (phy_id != SELF_ID_PHY_ID(q)) {
201 fw_err(card, "PHY ID mismatch in self ID: %d != %d\n",
202 phy_id, SELF_ID_PHY_ID(q));
203 return NULL;
204 }
205
206 if (child_port_count > stack_depth) {
207 fw_err(card, "topology stack underflow\n");
208 return NULL;
209 }
210
211 /*
212 * Seek back from the top of our stack to find the
213 * start of the child nodes for this node.
214 */
215 for (i = 0, h = &stack; i < child_port_count; i++)
216 h = h->prev;
217 /*
218 * When the stack is empty, this yields an invalid value,
219 * but that pointer will never be dereferenced.
220 */
221 child = fw_node(h);
222
223 node = fw_node_create(q, port_count, card->color);
224 if (node == NULL) {
225 fw_err(card, "out of memory while building topology\n");
226 return NULL;
227 }
228
229 if (phy_id == (card->node_id & 0x3f))
230 local_node = node;
231
232 if (SELF_ID_CONTENDER(q))
233 irm_node = node;
234
235 parent_count = 0;
236
237 for (i = 0; i < port_count; i++) {
238 switch (get_port_type(sid, i)) {
239 case SELFID_PORT_PARENT:
240 /*
241 * Who's your daddy? We dont know the
242 * parent node at this time, so we
243 * temporarily abuse node->color for
244 * remembering the entry in the
245 * node->ports array where the parent
246 * node should be. Later, when we
247 * handle the parent node, we fix up
248 * the reference.
249 */
250 parent_count++;
251 node->color = i;
252 break;
253
254 case SELFID_PORT_CHILD:
255 node->ports[i] = child;
256 /*
257 * Fix up parent reference for this
258 * child node.
259 */
260 child->ports[child->color] = node;
261 child->color = card->color;
262 child = fw_node(child->link.next);
263 break;
264 }
265 }
266
267 /*
268 * Check that the node reports exactly one parent
269 * port, except for the root, which of course should
270 * have no parents.
271 */
272 if ((next_sid == end && parent_count != 0) ||
273 (next_sid < end && parent_count != 1)) {
274 fw_err(card, "parent port inconsistency for node %d: "
275 "parent_count=%d\n", phy_id, parent_count);
276 return NULL;
277 }
278
279 /* Pop the child nodes off the stack and push the new node. */
280 __list_del(h->prev, &stack);
281 list_add_tail(&node->link, &stack);
282 stack_depth += 1 - child_port_count;
283
284 if (node->phy_speed == SCODE_BETA &&
285 parent_count + child_port_count > 1)
286 beta_repeaters_present = true;
287
288 /*
289 * If PHYs report different gap counts, set an invalid count
290 * which will force a gap count reconfiguration and a reset.
291 */
292 if (SELF_ID_GAP_COUNT(q) != gap_count)
293 gap_count = 0;
294
295 update_hop_count(node);
296
297 sid = next_sid;
298 phy_id++;
299 }
300
301 card->root_node = node;
302 card->irm_node = irm_node;
303 card->gap_count = gap_count;
304 card->beta_repeaters_present = beta_repeaters_present;
305
306 return local_node;
307 }
308
309 typedef void (*fw_node_callback_t)(struct fw_card * card,
310 struct fw_node * node,
311 struct fw_node * parent);
312
313 static void for_each_fw_node(struct fw_card *card, struct fw_node *root,
314 fw_node_callback_t callback)
315 {
316 struct list_head list;
317 struct fw_node *node, *next, *child, *parent;
318 int i;
319
320 INIT_LIST_HEAD(&list);
321
322 fw_node_get(root);
323 list_add_tail(&root->link, &list);
324 parent = NULL;
325 list_for_each_entry(node, &list, link) {
326 node->color = card->color;
327
328 for (i = 0; i < node->port_count; i++) {
329 child = node->ports[i];
330 if (!child)
331 continue;
332 if (child->color == card->color)
333 parent = child;
334 else {
335 fw_node_get(child);
336 list_add_tail(&child->link, &list);
337 }
338 }
339
340 callback(card, node, parent);
341 }
342
343 list_for_each_entry_safe(node, next, &list, link)
344 fw_node_put(node);
345 }
346
347 static void report_lost_node(struct fw_card *card,
348 struct fw_node *node, struct fw_node *parent)
349 {
350 fw_node_event(card, node, FW_NODE_DESTROYED);
351 fw_node_put(node);
352
353 /* Topology has changed - reset bus manager retry counter */
354 card->bm_retries = 0;
355 }
356
357 static void report_found_node(struct fw_card *card,
358 struct fw_node *node, struct fw_node *parent)
359 {
360 int b_path = (node->phy_speed == SCODE_BETA);
361
362 if (parent != NULL) {
363 /* min() macro doesn't work here with gcc 3.4 */
364 node->max_speed = parent->max_speed < node->phy_speed ?
365 parent->max_speed : node->phy_speed;
366 node->b_path = parent->b_path && b_path;
367 } else {
368 node->max_speed = node->phy_speed;
369 node->b_path = b_path;
370 }
371
372 fw_node_event(card, node, FW_NODE_CREATED);
373
374 /* Topology has changed - reset bus manager retry counter */
375 card->bm_retries = 0;
376 }
377
378 /* Must be called with card->lock held */
379 void fw_destroy_nodes(struct fw_card *card)
380 {
381 card->color++;
382 if (card->local_node != NULL)
383 for_each_fw_node(card, card->local_node, report_lost_node);
384 card->local_node = NULL;
385 }
386
387 static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
388 {
389 struct fw_node *tree;
390 int i;
391
392 tree = node1->ports[port];
393 node0->ports[port] = tree;
394 for (i = 0; i < tree->port_count; i++) {
395 if (tree->ports[i] == node1) {
396 tree->ports[i] = node0;
397 break;
398 }
399 }
400 }
401
402 /*
403 * Compare the old topology tree for card with the new one specified by root.
404 * Queue the nodes and mark them as either found, lost or updated.
405 * Update the nodes in the card topology tree as we go.
406 */
407 static void update_tree(struct fw_card *card, struct fw_node *root)
408 {
409 struct list_head list0, list1;
410 struct fw_node *node0, *node1, *next1;
411 int i, event;
412
413 INIT_LIST_HEAD(&list0);
414 list_add_tail(&card->local_node->link, &list0);
415 INIT_LIST_HEAD(&list1);
416 list_add_tail(&root->link, &list1);
417
418 node0 = fw_node(list0.next);
419 node1 = fw_node(list1.next);
420
421 while (&node0->link != &list0) {
422 WARN_ON(node0->port_count != node1->port_count);
423
424 if (node0->link_on && !node1->link_on)
425 event = FW_NODE_LINK_OFF;
426 else if (!node0->link_on && node1->link_on)
427 event = FW_NODE_LINK_ON;
428 else if (node1->initiated_reset && node1->link_on)
429 event = FW_NODE_INITIATED_RESET;
430 else
431 event = FW_NODE_UPDATED;
432
433 node0->node_id = node1->node_id;
434 node0->color = card->color;
435 node0->link_on = node1->link_on;
436 node0->initiated_reset = node1->initiated_reset;
437 node0->max_hops = node1->max_hops;
438 node1->color = card->color;
439 fw_node_event(card, node0, event);
440
441 if (card->root_node == node1)
442 card->root_node = node0;
443 if (card->irm_node == node1)
444 card->irm_node = node0;
445
446 for (i = 0; i < node0->port_count; i++) {
447 if (node0->ports[i] && node1->ports[i]) {
448 /*
449 * This port didn't change, queue the
450 * connected node for further
451 * investigation.
452 */
453 if (node0->ports[i]->color == card->color)
454 continue;
455 list_add_tail(&node0->ports[i]->link, &list0);
456 list_add_tail(&node1->ports[i]->link, &list1);
457 } else if (node0->ports[i]) {
458 /*
459 * The nodes connected here were
460 * unplugged; unref the lost nodes and
461 * queue FW_NODE_LOST callbacks for
462 * them.
463 */
464
465 for_each_fw_node(card, node0->ports[i],
466 report_lost_node);
467 node0->ports[i] = NULL;
468 } else if (node1->ports[i]) {
469 /*
470 * One or more node were connected to
471 * this port. Move the new nodes into
472 * the tree and queue FW_NODE_CREATED
473 * callbacks for them.
474 */
475 move_tree(node0, node1, i);
476 for_each_fw_node(card, node0->ports[i],
477 report_found_node);
478 }
479 }
480
481 node0 = fw_node(node0->link.next);
482 next1 = fw_node(node1->link.next);
483 fw_node_put(node1);
484 node1 = next1;
485 }
486 }
487
488 static void update_topology_map(struct fw_card *card,
489 u32 *self_ids, int self_id_count)
490 {
491 int node_count = (card->root_node->node_id & 0x3f) + 1;
492 __be32 *map = card->topology_map;
493
494 *map++ = cpu_to_be32((self_id_count + 2) << 16);
495 *map++ = cpu_to_be32(be32_to_cpu(card->topology_map[1]) + 1);
496 *map++ = cpu_to_be32((node_count << 16) | self_id_count);
497
498 while (self_id_count--)
499 *map++ = cpu_to_be32p(self_ids++);
500
501 fw_compute_block_crc(card->topology_map);
502 }
503
504 void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
505 int self_id_count, u32 *self_ids, bool bm_abdicate)
506 {
507 struct fw_node *local_node;
508 unsigned long flags;
509
510 spin_lock_irqsave(&card->lock, flags);
511
512 /*
513 * If the selfID buffer is not the immediate successor of the
514 * previously processed one, we cannot reliably compare the
515 * old and new topologies.
516 */
517 if (!is_next_generation(generation, card->generation) &&
518 card->local_node != NULL) {
519 fw_destroy_nodes(card);
520 card->bm_retries = 0;
521 }
522
523 card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
524 card->node_id = node_id;
525 /*
526 * Update node_id before generation to prevent anybody from using
527 * a stale node_id together with a current generation.
528 */
529 smp_wmb();
530 card->generation = generation;
531 card->reset_jiffies = get_jiffies_64();
532 card->bm_node_id = 0xffff;
533 card->bm_abdicate = bm_abdicate;
534 fw_schedule_bm_work(card, 0);
535
536 local_node = build_tree(card, self_ids, self_id_count);
537
538 update_topology_map(card, self_ids, self_id_count);
539
540 card->color++;
541
542 if (local_node == NULL) {
543 fw_err(card, "topology build failed\n");
544 /* FIXME: We need to issue a bus reset in this case. */
545 } else if (card->local_node == NULL) {
546 card->local_node = local_node;
547 for_each_fw_node(card, local_node, report_found_node);
548 } else {
549 update_tree(card, local_node);
550 }
551
552 spin_unlock_irqrestore(&card->lock, flags);
553 }
554 EXPORT_SYMBOL(fw_core_handle_bus_reset);