1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Incremental bus scan, based on bus topology
5 * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
9 #include <linux/errno.h>
10 #include <linux/firewire.h>
11 #include <linux/firewire-constants.h>
12 #include <linux/jiffies.h>
13 #include <linux/kernel.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
19 #include <linux/atomic.h>
20 #include <asm/byteorder.h>
24 #define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f)
25 #define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01)
26 #define SELF_ID_LINK_ON(q) (((q) >> 22) & 0x01)
27 #define SELF_ID_GAP_COUNT(q) (((q) >> 16) & 0x3f)
28 #define SELF_ID_PHY_SPEED(q) (((q) >> 14) & 0x03)
29 #define SELF_ID_CONTENDER(q) (((q) >> 11) & 0x01)
30 #define SELF_ID_PHY_INITIATOR(q) (((q) >> 1) & 0x01)
31 #define SELF_ID_MORE_PACKETS(q) (((q) >> 0) & 0x01)
33 #define SELF_ID_EXT_SEQUENCE(q) (((q) >> 20) & 0x07)
35 #define SELFID_PORT_CHILD 0x3
36 #define SELFID_PORT_PARENT 0x2
37 #define SELFID_PORT_NCONN 0x1
38 #define SELFID_PORT_NONE 0x0
40 static u32
*count_ports(u32
*sid
, int *total_port_count
, int *child_port_count
)
43 int port_type
, shift
, seq
;
45 *total_port_count
= 0;
46 *child_port_count
= 0;
53 port_type
= (q
>> shift
) & 0x03;
55 case SELFID_PORT_CHILD
:
56 (*child_port_count
)++;
58 case SELFID_PORT_PARENT
:
59 case SELFID_PORT_NCONN
:
60 (*total_port_count
)++;
62 case SELFID_PORT_NONE
:
68 if (!SELF_ID_MORE_PACKETS(q
))
76 * Check that the extra packets actually are
77 * extended self ID packets and that the
78 * sequence numbers in the extended self ID
79 * packets increase as expected.
82 if (!SELF_ID_EXTENDED(q
) ||
83 seq
!= SELF_ID_EXT_SEQUENCE(q
))
91 static int get_port_type(u32
*sid
, int port_index
)
95 index
= (port_index
+ 5) / 8;
96 shift
= 16 - ((port_index
+ 5) & 7) * 2;
97 return (sid
[index
] >> shift
) & 0x03;
100 static struct fw_node
*fw_node_create(u32 sid
, int port_count
, int color
)
102 struct fw_node
*node
;
104 node
= kzalloc(struct_size(node
, ports
, port_count
), GFP_ATOMIC
);
109 node
->node_id
= LOCAL_BUS
| SELF_ID_PHY_ID(sid
);
110 node
->link_on
= SELF_ID_LINK_ON(sid
);
111 node
->phy_speed
= SELF_ID_PHY_SPEED(sid
);
112 node
->initiated_reset
= SELF_ID_PHY_INITIATOR(sid
);
113 node
->port_count
= port_count
;
115 refcount_set(&node
->ref_count
, 1);
116 INIT_LIST_HEAD(&node
->link
);
122 * Compute the maximum hop count for this node and it's children. The
123 * maximum hop count is the maximum number of connections between any
124 * two nodes in the subtree rooted at this node. We need this for
125 * setting the gap count. As we build the tree bottom up in
126 * build_tree() below, this is fairly easy to do: for each node we
127 * maintain the max hop count and the max depth, ie the number of hops
128 * to the furthest leaf. Computing the max hop count breaks down into
129 * two cases: either the path goes through this node, in which case
130 * the hop count is the sum of the two biggest child depths plus 2.
131 * Or it could be the case that the max hop path is entirely
132 * containted in a child tree, in which case the max hop count is just
133 * the max hop count of this child.
135 static void update_hop_count(struct fw_node
*node
)
137 int depths
[2] = { -1, -1 };
138 int max_child_hops
= 0;
141 for (i
= 0; i
< node
->port_count
; i
++) {
142 if (node
->ports
[i
] == NULL
)
145 if (node
->ports
[i
]->max_hops
> max_child_hops
)
146 max_child_hops
= node
->ports
[i
]->max_hops
;
148 if (node
->ports
[i
]->max_depth
> depths
[0]) {
149 depths
[1] = depths
[0];
150 depths
[0] = node
->ports
[i
]->max_depth
;
151 } else if (node
->ports
[i
]->max_depth
> depths
[1])
152 depths
[1] = node
->ports
[i
]->max_depth
;
155 node
->max_depth
= depths
[0] + 1;
156 node
->max_hops
= max(max_child_hops
, depths
[0] + depths
[1] + 2);
159 static inline struct fw_node
*fw_node(struct list_head
*l
)
161 return list_entry(l
, struct fw_node
, link
);
165 * This function builds the tree representation of the topology given
166 * by the self IDs from the latest bus reset. During the construction
167 * of the tree, the function checks that the self IDs are valid and
168 * internally consistent. On success this function returns the
169 * fw_node corresponding to the local card otherwise NULL.
171 static struct fw_node
*build_tree(struct fw_card
*card
,
172 u32
*sid
, int self_id_count
)
174 struct fw_node
*node
, *child
, *local_node
, *irm_node
;
175 struct list_head stack
, *h
;
176 u32
*next_sid
, *end
, q
;
177 int i
, port_count
, child_port_count
, phy_id
, parent_count
, stack_depth
;
179 bool beta_repeaters_present
;
183 INIT_LIST_HEAD(&stack
);
185 end
= sid
+ self_id_count
;
188 gap_count
= SELF_ID_GAP_COUNT(*sid
);
189 beta_repeaters_present
= false;
192 next_sid
= count_ports(sid
, &port_count
, &child_port_count
);
194 if (next_sid
== NULL
) {
195 fw_err(card
, "inconsistent extended self IDs\n");
200 if (phy_id
!= SELF_ID_PHY_ID(q
)) {
201 fw_err(card
, "PHY ID mismatch in self ID: %d != %d\n",
202 phy_id
, SELF_ID_PHY_ID(q
));
206 if (child_port_count
> stack_depth
) {
207 fw_err(card
, "topology stack underflow\n");
212 * Seek back from the top of our stack to find the
213 * start of the child nodes for this node.
215 for (i
= 0, h
= &stack
; i
< child_port_count
; i
++)
218 * When the stack is empty, this yields an invalid value,
219 * but that pointer will never be dereferenced.
223 node
= fw_node_create(q
, port_count
, card
->color
);
225 fw_err(card
, "out of memory while building topology\n");
229 if (phy_id
== (card
->node_id
& 0x3f))
232 if (SELF_ID_CONTENDER(q
))
237 for (i
= 0; i
< port_count
; i
++) {
238 switch (get_port_type(sid
, i
)) {
239 case SELFID_PORT_PARENT
:
241 * Who's your daddy? We dont know the
242 * parent node at this time, so we
243 * temporarily abuse node->color for
244 * remembering the entry in the
245 * node->ports array where the parent
246 * node should be. Later, when we
247 * handle the parent node, we fix up
254 case SELFID_PORT_CHILD
:
255 node
->ports
[i
] = child
;
257 * Fix up parent reference for this
260 child
->ports
[child
->color
] = node
;
261 child
->color
= card
->color
;
262 child
= fw_node(child
->link
.next
);
268 * Check that the node reports exactly one parent
269 * port, except for the root, which of course should
272 if ((next_sid
== end
&& parent_count
!= 0) ||
273 (next_sid
< end
&& parent_count
!= 1)) {
274 fw_err(card
, "parent port inconsistency for node %d: "
275 "parent_count=%d\n", phy_id
, parent_count
);
279 /* Pop the child nodes off the stack and push the new node. */
280 __list_del(h
->prev
, &stack
);
281 list_add_tail(&node
->link
, &stack
);
282 stack_depth
+= 1 - child_port_count
;
284 if (node
->phy_speed
== SCODE_BETA
&&
285 parent_count
+ child_port_count
> 1)
286 beta_repeaters_present
= true;
289 * If PHYs report different gap counts, set an invalid count
290 * which will force a gap count reconfiguration and a reset.
292 if (SELF_ID_GAP_COUNT(q
) != gap_count
)
295 update_hop_count(node
);
301 card
->root_node
= node
;
302 card
->irm_node
= irm_node
;
303 card
->gap_count
= gap_count
;
304 card
->beta_repeaters_present
= beta_repeaters_present
;
309 typedef void (*fw_node_callback_t
)(struct fw_card
* card
,
310 struct fw_node
* node
,
311 struct fw_node
* parent
);
313 static void for_each_fw_node(struct fw_card
*card
, struct fw_node
*root
,
314 fw_node_callback_t callback
)
316 struct list_head list
;
317 struct fw_node
*node
, *next
, *child
, *parent
;
320 INIT_LIST_HEAD(&list
);
323 list_add_tail(&root
->link
, &list
);
325 list_for_each_entry(node
, &list
, link
) {
326 node
->color
= card
->color
;
328 for (i
= 0; i
< node
->port_count
; i
++) {
329 child
= node
->ports
[i
];
332 if (child
->color
== card
->color
)
336 list_add_tail(&child
->link
, &list
);
340 callback(card
, node
, parent
);
343 list_for_each_entry_safe(node
, next
, &list
, link
)
347 static void report_lost_node(struct fw_card
*card
,
348 struct fw_node
*node
, struct fw_node
*parent
)
350 fw_node_event(card
, node
, FW_NODE_DESTROYED
);
353 /* Topology has changed - reset bus manager retry counter */
354 card
->bm_retries
= 0;
357 static void report_found_node(struct fw_card
*card
,
358 struct fw_node
*node
, struct fw_node
*parent
)
360 int b_path
= (node
->phy_speed
== SCODE_BETA
);
362 if (parent
!= NULL
) {
363 /* min() macro doesn't work here with gcc 3.4 */
364 node
->max_speed
= parent
->max_speed
< node
->phy_speed
?
365 parent
->max_speed
: node
->phy_speed
;
366 node
->b_path
= parent
->b_path
&& b_path
;
368 node
->max_speed
= node
->phy_speed
;
369 node
->b_path
= b_path
;
372 fw_node_event(card
, node
, FW_NODE_CREATED
);
374 /* Topology has changed - reset bus manager retry counter */
375 card
->bm_retries
= 0;
378 /* Must be called with card->lock held */
379 void fw_destroy_nodes(struct fw_card
*card
)
382 if (card
->local_node
!= NULL
)
383 for_each_fw_node(card
, card
->local_node
, report_lost_node
);
384 card
->local_node
= NULL
;
387 static void move_tree(struct fw_node
*node0
, struct fw_node
*node1
, int port
)
389 struct fw_node
*tree
;
392 tree
= node1
->ports
[port
];
393 node0
->ports
[port
] = tree
;
394 for (i
= 0; i
< tree
->port_count
; i
++) {
395 if (tree
->ports
[i
] == node1
) {
396 tree
->ports
[i
] = node0
;
403 * Compare the old topology tree for card with the new one specified by root.
404 * Queue the nodes and mark them as either found, lost or updated.
405 * Update the nodes in the card topology tree as we go.
407 static void update_tree(struct fw_card
*card
, struct fw_node
*root
)
409 struct list_head list0
, list1
;
410 struct fw_node
*node0
, *node1
, *next1
;
413 INIT_LIST_HEAD(&list0
);
414 list_add_tail(&card
->local_node
->link
, &list0
);
415 INIT_LIST_HEAD(&list1
);
416 list_add_tail(&root
->link
, &list1
);
418 node0
= fw_node(list0
.next
);
419 node1
= fw_node(list1
.next
);
421 while (&node0
->link
!= &list0
) {
422 WARN_ON(node0
->port_count
!= node1
->port_count
);
424 if (node0
->link_on
&& !node1
->link_on
)
425 event
= FW_NODE_LINK_OFF
;
426 else if (!node0
->link_on
&& node1
->link_on
)
427 event
= FW_NODE_LINK_ON
;
428 else if (node1
->initiated_reset
&& node1
->link_on
)
429 event
= FW_NODE_INITIATED_RESET
;
431 event
= FW_NODE_UPDATED
;
433 node0
->node_id
= node1
->node_id
;
434 node0
->color
= card
->color
;
435 node0
->link_on
= node1
->link_on
;
436 node0
->initiated_reset
= node1
->initiated_reset
;
437 node0
->max_hops
= node1
->max_hops
;
438 node1
->color
= card
->color
;
439 fw_node_event(card
, node0
, event
);
441 if (card
->root_node
== node1
)
442 card
->root_node
= node0
;
443 if (card
->irm_node
== node1
)
444 card
->irm_node
= node0
;
446 for (i
= 0; i
< node0
->port_count
; i
++) {
447 if (node0
->ports
[i
] && node1
->ports
[i
]) {
449 * This port didn't change, queue the
450 * connected node for further
453 if (node0
->ports
[i
]->color
== card
->color
)
455 list_add_tail(&node0
->ports
[i
]->link
, &list0
);
456 list_add_tail(&node1
->ports
[i
]->link
, &list1
);
457 } else if (node0
->ports
[i
]) {
459 * The nodes connected here were
460 * unplugged; unref the lost nodes and
461 * queue FW_NODE_LOST callbacks for
465 for_each_fw_node(card
, node0
->ports
[i
],
467 node0
->ports
[i
] = NULL
;
468 } else if (node1
->ports
[i
]) {
470 * One or more node were connected to
471 * this port. Move the new nodes into
472 * the tree and queue FW_NODE_CREATED
473 * callbacks for them.
475 move_tree(node0
, node1
, i
);
476 for_each_fw_node(card
, node0
->ports
[i
],
481 node0
= fw_node(node0
->link
.next
);
482 next1
= fw_node(node1
->link
.next
);
488 static void update_topology_map(struct fw_card
*card
,
489 u32
*self_ids
, int self_id_count
)
491 int node_count
= (card
->root_node
->node_id
& 0x3f) + 1;
492 __be32
*map
= card
->topology_map
;
494 *map
++ = cpu_to_be32((self_id_count
+ 2) << 16);
495 *map
++ = cpu_to_be32(be32_to_cpu(card
->topology_map
[1]) + 1);
496 *map
++ = cpu_to_be32((node_count
<< 16) | self_id_count
);
498 while (self_id_count
--)
499 *map
++ = cpu_to_be32p(self_ids
++);
501 fw_compute_block_crc(card
->topology_map
);
504 void fw_core_handle_bus_reset(struct fw_card
*card
, int node_id
, int generation
,
505 int self_id_count
, u32
*self_ids
, bool bm_abdicate
)
507 struct fw_node
*local_node
;
510 spin_lock_irqsave(&card
->lock
, flags
);
513 * If the selfID buffer is not the immediate successor of the
514 * previously processed one, we cannot reliably compare the
515 * old and new topologies.
517 if (!is_next_generation(generation
, card
->generation
) &&
518 card
->local_node
!= NULL
) {
519 fw_destroy_nodes(card
);
520 card
->bm_retries
= 0;
523 card
->broadcast_channel_allocated
= card
->broadcast_channel_auto_allocated
;
524 card
->node_id
= node_id
;
526 * Update node_id before generation to prevent anybody from using
527 * a stale node_id together with a current generation.
530 card
->generation
= generation
;
531 card
->reset_jiffies
= get_jiffies_64();
532 card
->bm_node_id
= 0xffff;
533 card
->bm_abdicate
= bm_abdicate
;
534 fw_schedule_bm_work(card
, 0);
536 local_node
= build_tree(card
, self_ids
, self_id_count
);
538 update_topology_map(card
, self_ids
, self_id_count
);
542 if (local_node
== NULL
) {
543 fw_err(card
, "topology build failed\n");
544 /* FIXME: We need to issue a bus reset in this case. */
545 } else if (card
->local_node
== NULL
) {
546 card
->local_node
= local_node
;
547 for_each_fw_node(card
, local_node
, report_found_node
);
549 update_tree(card
, local_node
);
552 spin_unlock_irqrestore(&card
->lock
, flags
);
554 EXPORT_SYMBOL(fw_core_handle_bus_reset
);