--- /dev/null
+Test a fenced node where a admin removed all service after fence start but
+before fencing succeeded. This shouldn't keep the node in "fence" state
+forever.
--- /dev/null
+[
+ [ "power node1 on", "power node2 on", "power node3 on"],
+ ["service vm:103 add node3 stopped"], ["service vm:103 started"]
+]
--- /dev/null
+{
+ "node1": { "power": "off", "network": "off" },
+ "node2": { "power": "off", "network": "off" },
+ "node3": { "power": "off", "network": "off" }
+}
--- /dev/null
+info 0 hardware: starting simulation
+info 20 cmdlist: execute power node1 on
+info 20 node1/crm: status change startup => wait_for_quorum
+info 20 node1/lrm: status change startup => wait_for_agent_lock
+info 20 cmdlist: execute power node2 on
+info 20 node2/crm: status change startup => wait_for_quorum
+info 20 node2/lrm: status change startup => wait_for_agent_lock
+info 20 cmdlist: execute power node3 on
+info 20 node3/crm: status change startup => wait_for_quorum
+info 20 node3/lrm: status change startup => wait_for_agent_lock
+info 20 node1/crm: got lock 'ha_manager_lock'
+info 20 node1/crm: status change wait_for_quorum => master
+info 21 node1/lrm: got lock 'ha_agent_node1_lock'
+info 21 node1/lrm: status change wait_for_agent_lock => active
+info 21 node1/lrm: starting service vm:101
+info 21 node1/lrm: service status vm:101 started
+info 22 node2/crm: status change wait_for_quorum => slave
+info 24 node3/crm: status change wait_for_quorum => slave
+info 120 cmdlist: execute service vm:103 add node3 stopped
+info 120 node1/crm: adding new service 'vm:103' on node 'node3'
+info 125 node3/lrm: got lock 'ha_agent_node3_lock'
+info 125 node3/lrm: status change wait_for_agent_lock => active
+info 140 node1/crm: service 'vm:103': state changed from 'request_stop' to 'stopped'
+info 220 cmdlist: execute service vm:103 started
+info 220 node1/crm: service 'vm:103': state changed from 'stopped' to 'started' (node = node3)
+info 225 node3/lrm: starting service vm:103
+info 225 node3/lrm: service status vm:103 started
+info 820 hardware: exit simulation - done
--- /dev/null
+{
+ "timestamp": 100,
+ "master_node": "node1",
+ "service_status": {
+ "vm:101": {"state": "started", "node": "node1", "uid": "0StZls8UGuAhEGuKm7xNhA", "running": 1},
+ "vm:102": {"state": "stopped", "node": "node2", "uid": "47mrPA7fNXjAyaN5n9IEJg"}
+ },
+ "node_status": {
+ "node1": "online",
+ "node2": "online",
+ "node3": "fence"
+ }
+}
--- /dev/null
+{
+ "vm:101": { "node": "node1", "state": "enabled" },
+ "vm:102": { "node": "node2" }
+}
--- /dev/null
+Test a fenced node where a admin removed all service after fence start but
+before fencing succeeded. This shouldn't keep the node in "fence" state
+forever.
--- /dev/null
+[
+ [
+ "power node1 on", "power node2 on", "power node3 on",
+ "skip-round crm 2",
+ "service vm:103 started"
+ ]
+]
--- /dev/null
+{
+ "node1": { "power": "off", "network": "off" },
+ "node2": { "power": "off", "network": "off" },
+ "node3": { "power": "off", "network": "off" }
+}
--- /dev/null
+info 0 hardware: starting simulation
+info 20 cmdlist: execute power node1 on
+info 20 node1/crm: status change startup => wait_for_quorum
+info 20 node1/lrm: status change startup => wait_for_agent_lock
+info 20 cmdlist: execute power node2 on
+info 20 node2/crm: status change startup => wait_for_quorum
+info 20 node2/lrm: status change startup => wait_for_agent_lock
+info 20 cmdlist: execute power node3 on
+info 20 node3/crm: status change startup => wait_for_quorum
+info 20 node3/lrm: status change startup => wait_for_agent_lock
+info 20 cmdlist: execute skip-round crm 2
+info 20 cmdlist: execute service vm:103 started
+info 20 run-loop: skipping CRM round
+info 20 node1/lrm: got lock 'ha_agent_node1_lock'
+info 20 node1/lrm: status change wait_for_agent_lock => active
+info 20 node1/lrm: starting service vm:101
+info 20 node1/lrm: service status vm:101 started
+info 22 node3/lrm: got lock 'ha_agent_node3_lock'
+info 22 node3/lrm: status change wait_for_agent_lock => active
+info 22 node3/lrm: starting service vm:103
+info 22 node3/lrm: service status vm:103 started
+info 40 run-loop: skipping CRM round
+info 60 node1/crm: got lock 'ha_manager_lock'
+info 60 node1/crm: status change wait_for_quorum => master
+info 62 node2/crm: status change wait_for_quorum => slave
+info 64 node3/crm: status change wait_for_quorum => slave
+info 620 hardware: exit simulation - done
--- /dev/null
+{
+ "timestamp": 100,
+ "master_node": "node1",
+ "service_status": {
+ "vm:101": {"state": "started", "node": "node1", "uid": "0StZls8UGuAhEGuKm7xNhA", "running": 1},
+ "vm:102": {"state": "stopped", "node": "node2", "uid": "47mrPA7fNXjAyaN5n9IEJg"},
+ "vm:103": {"state": "started", "node": "node3", "uid": "47mrPA7fNXjAyaN5n9IEJa"}
+ },
+ "node_status": {
+ "node1": "online",
+ "node2": "online",
+ "node3": "fence"
+ }
+}
--- /dev/null
+{
+ "vm:101": { "node": "node1", "state": "enabled" },
+ "vm:102": { "node": "node2" },
+ "vm:103": { "node": "node3", "state": "enabled" }
+}